aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-02-18 12:35:42 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-18 12:35:42 -0500
commit2f219d5fb1184bda8398e5ab9fb2910a202e2c94 (patch)
treef5773b7c41cae27aa2ce0ec3acdbb097121dcd47
parent96b45cbd956ce83908378d87d009b05645353f22 (diff)
parent990a3158002ff19b2d665334bc7a21a8887a123d (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to e1000, e1000e, igb, igbvf and ixgbe. The e1000, e1000e, igb and igbvf are single patch changes and the remaining 11 patches are all against ixgbe. The e1000 patch is a comment cleanup to align e1000 with the code commenting style for /drivers/net. It also contains a few other white space cleanups (i.e. fix lines over 80 char, remove unnecessary blank lines and fix the use of tabs/spaces). The e1000e patch from Koki (Fujitsu) adds a warning when link speed is downgraded due to SmartSpeed. The igb patch from Stefan (Red Hat) increases the timeout in the ethtool offline self-test because some i350 adapters would sometimes fail the self-test because link auto negotiation may take longer than the current 4 second timeout. The igbvf patch from Alex is meant to address several race issues that become possible because next_to_watch could possibly be set to a value that shows that the descriptor is done when it is not. In order to correct that we instead make next_to_watch a pointer that is set to NULL during cleanup, and set to the eop_desc after the descriptor rings have been written. The remaining patches for ixgbe are a mix of fixes and added support as well as some cleanup. Most notably is the added support for displaying the number of Tx/Rx channels via ethtool by Alex. Also Aurélien adds the ability for reading data from SFP+ modules over i2c for diagnostic monitoring. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h65
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c558
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c322
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c257
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c125
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c36
14 files changed, 973 insertions, 660 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 2b6cd02bfba0..26d9cd59ec75 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -81,68 +81,69 @@ struct e1000_adapter;
81 81
82#include "e1000_hw.h" 82#include "e1000_hw.h"
83 83
84#define E1000_MAX_INTR 10 84#define E1000_MAX_INTR 10
85 85
86/* TX/RX descriptor defines */ 86/* TX/RX descriptor defines */
87#define E1000_DEFAULT_TXD 256 87#define E1000_DEFAULT_TXD 256
88#define E1000_MAX_TXD 256 88#define E1000_MAX_TXD 256
89#define E1000_MIN_TXD 48 89#define E1000_MIN_TXD 48
90#define E1000_MAX_82544_TXD 4096 90#define E1000_MAX_82544_TXD 4096
91 91
92#define E1000_DEFAULT_RXD 256 92#define E1000_DEFAULT_RXD 256
93#define E1000_MAX_RXD 256 93#define E1000_MAX_RXD 256
94#define E1000_MIN_RXD 48 94#define E1000_MIN_RXD 48
95#define E1000_MAX_82544_RXD 4096 95#define E1000_MAX_82544_RXD 4096
96 96
97#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ 97#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
98#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ 98#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
99 99
100/* this is the size past which hardware will drop packets when setting LPE=0 */ 100/* this is the size past which hardware will drop packets when setting LPE=0 */
101#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 101#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
102 102
103/* Supported Rx Buffer Sizes */ 103/* Supported Rx Buffer Sizes */
104#define E1000_RXBUFFER_128 128 /* Used for packet split */ 104#define E1000_RXBUFFER_128 128 /* Used for packet split */
105#define E1000_RXBUFFER_256 256 /* Used for packet split */ 105#define E1000_RXBUFFER_256 256 /* Used for packet split */
106#define E1000_RXBUFFER_512 512 106#define E1000_RXBUFFER_512 512
107#define E1000_RXBUFFER_1024 1024 107#define E1000_RXBUFFER_1024 1024
108#define E1000_RXBUFFER_2048 2048 108#define E1000_RXBUFFER_2048 2048
109#define E1000_RXBUFFER_4096 4096 109#define E1000_RXBUFFER_4096 4096
110#define E1000_RXBUFFER_8192 8192 110#define E1000_RXBUFFER_8192 8192
111#define E1000_RXBUFFER_16384 16384 111#define E1000_RXBUFFER_16384 16384
112 112
113/* SmartSpeed delimiters */ 113/* SmartSpeed delimiters */
114#define E1000_SMARTSPEED_DOWNSHIFT 3 114#define E1000_SMARTSPEED_DOWNSHIFT 3
115#define E1000_SMARTSPEED_MAX 15 115#define E1000_SMARTSPEED_MAX 15
116 116
117/* Packet Buffer allocations */ 117/* Packet Buffer allocations */
118#define E1000_PBA_BYTES_SHIFT 0xA 118#define E1000_PBA_BYTES_SHIFT 0xA
119#define E1000_TX_HEAD_ADDR_SHIFT 7 119#define E1000_TX_HEAD_ADDR_SHIFT 7
120#define E1000_PBA_TX_MASK 0xFFFF0000 120#define E1000_PBA_TX_MASK 0xFFFF0000
121 121
122/* Flow Control Watermarks */ 122/* Flow Control Watermarks */
123#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ 123#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
124#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ 124#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
125 125
126#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ 126#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
127 127
128/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 128/* How many Tx Descriptors do we need to call netif_wake_queue ? */
129#define E1000_TX_QUEUE_WAKE 16 129#define E1000_TX_QUEUE_WAKE 16
130/* How many Rx Buffers do we bundle into one write to the hardware ? */ 130/* How many Rx Buffers do we bundle into one write to the hardware ? */
131#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 131#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
132 132
133#define AUTO_ALL_MODES 0 133#define AUTO_ALL_MODES 0
134#define E1000_EEPROM_82544_APM 0x0004 134#define E1000_EEPROM_82544_APM 0x0004
135#define E1000_EEPROM_APME 0x0400 135#define E1000_EEPROM_APME 0x0400
136 136
137#ifndef E1000_MASTER_SLAVE 137#ifndef E1000_MASTER_SLAVE
138/* Switch to override PHY master/slave setting */ 138/* Switch to override PHY master/slave setting */
139#define E1000_MASTER_SLAVE e1000_ms_hw_default 139#define E1000_MASTER_SLAVE e1000_ms_hw_default
140#endif 140#endif
141 141
142#define E1000_MNG_VLAN_NONE (-1) 142#define E1000_MNG_VLAN_NONE (-1)
143 143
144/* wrapper around a pointer to a socket buffer, 144/* wrapper around a pointer to a socket buffer,
145 * so a DMA handle can be stored along with the buffer */ 145 * so a DMA handle can be stored along with the buffer
146 */
146struct e1000_buffer { 147struct e1000_buffer {
147 struct sk_buff *skb; 148 struct sk_buff *skb;
148 dma_addr_t dma; 149 dma_addr_t dma;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 14e30515f6aa..43462d596a4e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
115 if (hw->media_type == e1000_media_type_copper) { 115 if (hw->media_type == e1000_media_type_copper) {
116 116
117 ecmd->supported = (SUPPORTED_10baseT_Half | 117 ecmd->supported = (SUPPORTED_10baseT_Half |
118 SUPPORTED_10baseT_Full | 118 SUPPORTED_10baseT_Full |
119 SUPPORTED_100baseT_Half | 119 SUPPORTED_100baseT_Half |
120 SUPPORTED_100baseT_Full | 120 SUPPORTED_100baseT_Full |
121 SUPPORTED_1000baseT_Full| 121 SUPPORTED_1000baseT_Full|
122 SUPPORTED_Autoneg | 122 SUPPORTED_Autoneg |
123 SUPPORTED_TP); 123 SUPPORTED_TP);
124 ecmd->advertising = ADVERTISED_TP; 124 ecmd->advertising = ADVERTISED_TP;
125 125
126 if (hw->autoneg == 1) { 126 if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
161 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 161 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
162 162
163 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 163 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
164 * and HALF_DUPLEX != DUPLEX_HALF */ 164 * and HALF_DUPLEX != DUPLEX_HALF
165 165 */
166 if (adapter->link_duplex == FULL_DUPLEX) 166 if (adapter->link_duplex == FULL_DUPLEX)
167 ecmd->duplex = DUPLEX_FULL; 167 ecmd->duplex = DUPLEX_FULL;
168 else 168 else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
179 if ((hw->media_type == e1000_media_type_copper) && 179 if ((hw->media_type == e1000_media_type_copper) &&
180 netif_carrier_ok(netdev)) 180 netif_carrier_ok(netdev))
181 ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? 181 ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
182 ETH_TP_MDI_X : 182 ETH_TP_MDI_X : ETH_TP_MDI);
183 ETH_TP_MDI);
184 else 183 else
185 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 184 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
186 185
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
197 struct e1000_adapter *adapter = netdev_priv(netdev); 196 struct e1000_adapter *adapter = netdev_priv(netdev);
198 struct e1000_hw *hw = &adapter->hw; 197 struct e1000_hw *hw = &adapter->hw;
199 198
200 /* 199 /* MDI setting is only allowed when autoneg enabled because
201 * MDI setting is only allowed when autoneg enabled because
202 * some hardware doesn't allow MDI setting when speed or 200 * some hardware doesn't allow MDI setting when speed or
203 * duplex is forced. 201 * duplex is forced.
204 */ 202 */
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
224 ADVERTISED_Autoneg; 222 ADVERTISED_Autoneg;
225 else 223 else
226 hw->autoneg_advertised = ecmd->advertising | 224 hw->autoneg_advertised = ecmd->advertising |
227 ADVERTISED_TP | 225 ADVERTISED_TP |
228 ADVERTISED_Autoneg; 226 ADVERTISED_Autoneg;
229 ecmd->advertising = hw->autoneg_advertised; 227 ecmd->advertising = hw->autoneg_advertised;
230 } else { 228 } else {
231 u32 speed = ethtool_cmd_speed(ecmd); 229 u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
260{ 258{
261 struct e1000_adapter *adapter = netdev_priv(netdev); 259 struct e1000_adapter *adapter = netdev_priv(netdev);
262 260
263 /* 261 /* If the link is not reported up to netdev, interrupts are disabled,
264 * If the link is not reported up to netdev, interrupts are disabled,
265 * and so the physical link state may have changed since we last 262 * and so the physical link state may have changed since we last
266 * looked. Set get_link_status to make sure that the true link 263 * looked. Set get_link_status to make sure that the true link
267 * state is interrogated, rather than pulling a cached and possibly 264 * state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
484 le16_to_cpus(&eeprom_buff[i]); 481 le16_to_cpus(&eeprom_buff[i]);
485 482
486 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 483 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
487 eeprom->len); 484 eeprom->len);
488 kfree(eeprom_buff); 485 kfree(eeprom_buff);
489 486
490 return ret_val; 487 return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
517 ptr = (void *)eeprom_buff; 514 ptr = (void *)eeprom_buff;
518 515
519 if (eeprom->offset & 1) { 516 if (eeprom->offset & 1) {
520 /* need read/modify/write of first changed EEPROM word */ 517 /* need read/modify/write of first changed EEPROM word
521 /* only the second byte of the word is being modified */ 518 * only the second byte of the word is being modified
519 */
522 ret_val = e1000_read_eeprom(hw, first_word, 1, 520 ret_val = e1000_read_eeprom(hw, first_word, 1,
523 &eeprom_buff[0]); 521 &eeprom_buff[0]);
524 ptr++; 522 ptr++;
525 } 523 }
526 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 524 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
527 /* need read/modify/write of last changed EEPROM word */ 525 /* need read/modify/write of last changed EEPROM word
528 /* only the first byte of the word is being modified */ 526 * only the first byte of the word is being modified
527 */
529 ret_val = e1000_read_eeprom(hw, last_word, 1, 528 ret_val = e1000_read_eeprom(hw, last_word, 1,
530 &eeprom_buff[last_word - first_word]); 529 &eeprom_buff[last_word - first_word]);
531 } 530 }
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
606 rx_old = adapter->rx_ring; 605 rx_old = adapter->rx_ring;
607 606
608 err = -ENOMEM; 607 err = -ENOMEM;
609 txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); 608 txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
609 GFP_KERNEL);
610 if (!txdr) 610 if (!txdr)
611 goto err_alloc_tx; 611 goto err_alloc_tx;
612 612
613 rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL); 613 rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
614 GFP_KERNEL);
614 if (!rxdr) 615 if (!rxdr)
615 goto err_alloc_rx; 616 goto err_alloc_rx;
616 617
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
619 620
620 rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); 621 rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
621 rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? 622 rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
622 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 623 E1000_MAX_RXD : E1000_MAX_82544_RXD));
623 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 624 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
624 625
625 txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); 626 txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
626 txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? 627 txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
627 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 628 E1000_MAX_TXD : E1000_MAX_82544_TXD));
628 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 629 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
629 630
630 for (i = 0; i < adapter->num_tx_queues; i++) 631 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
642 goto err_setup_tx; 643 goto err_setup_tx;
643 644
644 /* save the new, restore the old in order to free it, 645 /* save the new, restore the old in order to free it,
645 * then restore the new back again */ 646 * then restore the new back again
647 */
646 648
647 adapter->rx_ring = rx_old; 649 adapter->rx_ring = rx_old;
648 adapter->tx_ring = tx_old; 650 adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
784 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); 786 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
785 787
786 if (hw->mac_type >= e1000_82543) { 788 if (hw->mac_type >= e1000_82543) {
787
788 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); 789 REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
789 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 790 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
790 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); 791 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
795 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 796 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
796 0xFFFFFFFF); 797 0xFFFFFFFF);
797 } 798 }
798
799 } else { 799 } else {
800
801 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); 800 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
802 REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); 801 REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
803 REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); 802 REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
804 REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); 803 REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
805
806 } 804 }
807 805
808 value = E1000_MC_TBL_SIZE; 806 value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
858 856
859 *data = 0; 857 *data = 0;
860 858
861 /* NOTE: we don't test MSI interrupts here, yet */ 859 /* NOTE: we don't test MSI interrupts here, yet
862 /* Hook up test interrupt handler just for this test */ 860 * Hook up test interrupt handler just for this test
861 */
863 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 862 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
864 netdev)) 863 netdev))
865 shared_int = false; 864 shared_int = false;
866 else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, 865 else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
867 netdev->name, netdev)) { 866 netdev->name, netdev)) {
868 *data = 1; 867 *data = 1;
869 return -1; 868 return -1;
870 } 869 }
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1253 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1252 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1254 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1253 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1255 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1254 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1256 E1000_CTRL_FD); /* Force Duplex to FULL */ 1255 E1000_CTRL_FD); /* Force Duplex to FULL */
1257 1256
1258 if (hw->media_type == e1000_media_type_copper && 1257 if (hw->media_type == e1000_media_type_copper &&
1259 hw->phy_type == e1000_phy_m88) 1258 hw->phy_type == e1000_phy_m88)
1260 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1259 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1261 else { 1260 else {
1262 /* Set the ILOS bit on the fiber Nic is half 1261 /* Set the ILOS bit on the fiber Nic is half
1263 * duplex link is detected. */ 1262 * duplex link is detected.
1263 */
1264 stat_reg = er32(STATUS); 1264 stat_reg = er32(STATUS);
1265 if ((stat_reg & E1000_STATUS_FD) == 0) 1265 if ((stat_reg & E1000_STATUS_FD) == 0)
1266 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1266 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1446 1446
1447 ret_val = e1000_check_lbtest_frame( 1447 ret_val = e1000_check_lbtest_frame(
1448 rxdr->buffer_info[l].skb, 1448 rxdr->buffer_info[l].skb,
1449 1024); 1449 1024);
1450 if (!ret_val) 1450 if (!ret_val)
1451 good_cnt++; 1451 good_cnt++;
1452 if (unlikely(++l == rxdr->count)) l = 0; 1452 if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1493 hw->serdes_has_link = false; 1493 hw->serdes_has_link = false;
1494 1494
1495 /* On some blade server designs, link establishment 1495 /* On some blade server designs, link establishment
1496 * could take as long as 2-3 minutes */ 1496 * could take as long as 2-3 minutes
1497 */
1497 do { 1498 do {
1498 e1000_check_for_link(hw); 1499 e1000_check_for_link(hw);
1499 if (hw->serdes_has_link) 1500 if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
1545 e_info(hw, "offline testing starting\n"); 1546 e_info(hw, "offline testing starting\n");
1546 1547
1547 /* Link test performed before hardware reset so autoneg doesn't 1548 /* Link test performed before hardware reset so autoneg doesn't
1548 * interfere with test result */ 1549 * interfere with test result
1550 */
1549 if (e1000_link_test(adapter, &data[4])) 1551 if (e1000_link_test(adapter, &data[4]))
1550 eth_test->flags |= ETH_TEST_FL_FAILED; 1552 eth_test->flags |= ETH_TEST_FL_FAILED;
1551 1553
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
1639 default: 1641 default:
1640 /* dual port cards only support WoL on port A from now on 1642 /* dual port cards only support WoL on port A from now on
1641 * unless it was enabled in the eeprom for port B 1643 * unless it was enabled in the eeprom for port B
1642 * so exclude FUNC_1 ports from having WoL enabled */ 1644 * so exclude FUNC_1 ports from having WoL enabled
1645 */
1643 if (er32(STATUS) & E1000_STATUS_FUNC_1 && 1646 if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
1644 !adapter->eeprom_wol) { 1647 !adapter->eeprom_wol) {
1645 wol->supported = 0; 1648 wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
1663 wol->wolopts = 0; 1666 wol->wolopts = 0;
1664 1667
1665 /* this function will set ->supported = 0 and return 1 if wol is not 1668 /* this function will set ->supported = 0 and return 1 if wol is not
1666 * supported by this hardware */ 1669 * supported by this hardware
1670 */
1667 if (e1000_wol_exclusion(adapter, wol) || 1671 if (e1000_wol_exclusion(adapter, wol) ||
1668 !device_can_wakeup(&adapter->pdev->dev)) 1672 !device_can_wakeup(&adapter->pdev->dev))
1669 return; 1673 return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1839 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1843 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1840 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1844 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1841 } 1845 }
1842/* BUG_ON(i != E1000_STATS_LEN); */ 1846/* BUG_ON(i != E1000_STATS_LEN); */
1843} 1847}
1844 1848
1845static void e1000_get_strings(struct net_device *netdev, u32 stringset, 1849static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1859 ETH_GSTRING_LEN); 1863 ETH_GSTRING_LEN);
1860 p += ETH_GSTRING_LEN; 1864 p += ETH_GSTRING_LEN;
1861 } 1865 }
1862/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ 1866 /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1863 break; 1867 break;
1864 } 1868 }
1865} 1869}
1866 1870
1867static const struct ethtool_ops e1000_ethtool_ops = { 1871static const struct ethtool_ops e1000_ethtool_ops = {
1868 .get_settings = e1000_get_settings, 1872 .get_settings = e1000_get_settings,
1869 .set_settings = e1000_set_settings, 1873 .set_settings = e1000_set_settings,
1870 .get_drvinfo = e1000_get_drvinfo, 1874 .get_drvinfo = e1000_get_drvinfo,
1871 .get_regs_len = e1000_get_regs_len, 1875 .get_regs_len = e1000_get_regs_len,
1872 .get_regs = e1000_get_regs, 1876 .get_regs = e1000_get_regs,
1873 .get_wol = e1000_get_wol, 1877 .get_wol = e1000_get_wol,
1874 .set_wol = e1000_set_wol, 1878 .set_wol = e1000_set_wol,
1875 .get_msglevel = e1000_get_msglevel, 1879 .get_msglevel = e1000_get_msglevel,
1876 .set_msglevel = e1000_set_msglevel, 1880 .set_msglevel = e1000_set_msglevel,
1877 .nway_reset = e1000_nway_reset, 1881 .nway_reset = e1000_nway_reset,
1878 .get_link = e1000_get_link, 1882 .get_link = e1000_get_link,
1879 .get_eeprom_len = e1000_get_eeprom_len, 1883 .get_eeprom_len = e1000_get_eeprom_len,
1880 .get_eeprom = e1000_get_eeprom, 1884 .get_eeprom = e1000_get_eeprom,
1881 .set_eeprom = e1000_set_eeprom, 1885 .set_eeprom = e1000_set_eeprom,
1882 .get_ringparam = e1000_get_ringparam, 1886 .get_ringparam = e1000_get_ringparam,
1883 .set_ringparam = e1000_set_ringparam, 1887 .set_ringparam = e1000_set_ringparam,
1884 .get_pauseparam = e1000_get_pauseparam, 1888 .get_pauseparam = e1000_get_pauseparam,
1885 .set_pauseparam = e1000_set_pauseparam, 1889 .set_pauseparam = e1000_set_pauseparam,
1886 .self_test = e1000_diag_test, 1890 .self_test = e1000_diag_test,
1887 .get_strings = e1000_get_strings, 1891 .get_strings = e1000_get_strings,
1888 .set_phys_id = e1000_set_phys_id, 1892 .set_phys_id = e1000_set_phys_id,
1889 .get_ethtool_stats = e1000_get_ethtool_stats, 1893 .get_ethtool_stats = e1000_get_ethtool_stats,
1890 .get_sset_count = e1000_get_sset_count, 1894 .get_sset_count = e1000_get_sset_count,
1891 .get_coalesce = e1000_get_coalesce, 1895 .get_coalesce = e1000_get_coalesce,
1892 .set_coalesce = e1000_set_coalesce, 1896 .set_coalesce = e1000_set_coalesce,
1893 .get_ts_info = ethtool_op_get_ts_info, 1897 .get_ts_info = ethtool_op_get_ts_info,
1894}; 1898};
1895 1899
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8fedd2451538..2879b9631e15 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
164 if (hw->phy_init_script) { 164 if (hw->phy_init_script) {
165 msleep(20); 165 msleep(20);
166 166
167 /* Save off the current value of register 0x2F5B to be restored at 167 /* Save off the current value of register 0x2F5B to be restored
168 * the end of this routine. */ 168 * at the end of this routine.
169 */
169 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); 170 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
170 171
171 /* Disabled the PHY transmitter */ 172 /* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
466 case e1000_82541: 467 case e1000_82541:
467 case e1000_82541_rev_2: 468 case e1000_82541_rev_2:
468 /* These controllers can't ack the 64-bit write when issuing the 469 /* These controllers can't ack the 64-bit write when issuing the
469 * reset, so use IO-mapping as a workaround to issue the reset */ 470 * reset, so use IO-mapping as a workaround to issue the reset
471 */
470 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); 472 E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
471 break; 473 break;
472 case e1000_82545_rev_3: 474 case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
480 break; 482 break;
481 } 483 }
482 484
483 /* After MAC reset, force reload of EEPROM to restore power-on settings to 485 /* After MAC reset, force reload of EEPROM to restore power-on settings
484 * device. Later controllers reload the EEPROM automatically, so just wait 486 * to device. Later controllers reload the EEPROM automatically, so
485 * for reload to complete. 487 * just wait for reload to complete.
486 */ 488 */
487 switch (hw->mac_type) { 489 switch (hw->mac_type) {
488 case e1000_82542_rev2_0: 490 case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
591 msleep(5); 593 msleep(5);
592 } 594 }
593 595
594 /* Setup the receive address. This involves initializing all of the Receive 596 /* Setup the receive address. This involves initializing all of the
595 * Address Registers (RARs 0 - 15). 597 * Receive Address Registers (RARs 0 - 15).
596 */ 598 */
597 e1000_init_rx_addrs(hw); 599 e1000_init_rx_addrs(hw);
598 600
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
611 for (i = 0; i < mta_size; i++) { 613 for (i = 0; i < mta_size; i++) {
612 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 614 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
613 /* use write flush to prevent Memory Write Block (MWB) from 615 /* use write flush to prevent Memory Write Block (MWB) from
614 * occurring when accessing our register space */ 616 * occurring when accessing our register space
617 */
615 E1000_WRITE_FLUSH(); 618 E1000_WRITE_FLUSH();
616 } 619 }
617 620
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
630 case e1000_82546_rev_3: 633 case e1000_82546_rev_3:
631 break; 634 break;
632 default: 635 default:
633 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ 636 /* Workaround for PCI-X problem when BIOS sets MMRBC
637 * incorrectly.
638 */
634 if (hw->bus_type == e1000_bus_type_pcix 639 if (hw->bus_type == e1000_bus_type_pcix
635 && e1000_pcix_get_mmrbc(hw) > 2048) 640 && e1000_pcix_get_mmrbc(hw) > 2048)
636 e1000_pcix_set_mmrbc(hw, 2048); 641 e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
660 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { 665 hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
661 ctrl_ext = er32(CTRL_EXT); 666 ctrl_ext = er32(CTRL_EXT);
662 /* Relaxed ordering must be disabled to avoid a parity 667 /* Relaxed ordering must be disabled to avoid a parity
663 * error crash in a PCI slot. */ 668 * error crash in a PCI slot.
669 */
664 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 670 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
665 ew32(CTRL_EXT, ctrl_ext); 671 ew32(CTRL_EXT, ctrl_ext);
666 } 672 }
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
810 ew32(FCRTL, 0); 816 ew32(FCRTL, 0);
811 ew32(FCRTH, 0); 817 ew32(FCRTH, 0);
812 } else { 818 } else {
813 /* We need to set up the Receive Threshold high and low water marks 819 /* We need to set up the Receive Threshold high and low water
814 * as well as (optionally) enabling the transmission of XON frames. 820 * marks as well as (optionally) enabling the transmission of
821 * XON frames.
815 */ 822 */
816 if (hw->fc_send_xon) { 823 if (hw->fc_send_xon) {
817 ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); 824 ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
868 e1000_config_collision_dist(hw); 875 e1000_config_collision_dist(hw);
869 876
870 /* Check for a software override of the flow control settings, and setup 877 /* Check for a software override of the flow control settings, and setup
871 * the device accordingly. If auto-negotiation is enabled, then software 878 * the device accordingly. If auto-negotiation is enabled, then
872 * will have to set the "PAUSE" bits to the correct value in the Tranmsit 879 * software will have to set the "PAUSE" bits to the correct value in
873 * Config Word Register (TXCW) and re-start auto-negotiation. However, if 880 * the Tranmsit Config Word Register (TXCW) and re-start
874 * auto-negotiation is disabled, then software will have to manually 881 * auto-negotiation. However, if auto-negotiation is disabled, then
875 * configure the two flow control enable bits in the CTRL register. 882 * software will have to manually configure the two flow control enable
883 * bits in the CTRL register.
876 * 884 *
877 * The possible values of the "fc" parameter are: 885 * The possible values of the "fc" parameter are:
878 * 0: Flow control is completely disabled 886 * 0: Flow control is completely disabled
879 * 1: Rx flow control is enabled (we can receive pause frames, but 887 * 1: Rx flow control is enabled (we can receive pause frames, but
880 * not send pause frames). 888 * not send pause frames).
881 * 2: Tx flow control is enabled (we can send pause frames but we do 889 * 2: Tx flow control is enabled (we can send pause frames but we do
882 * not support receiving pause frames). 890 * not support receiving pause frames).
883 * 3: Both Rx and TX flow control (symmetric) are enabled. 891 * 3: Both Rx and TX flow control (symmetric) are enabled.
884 */ 892 */
885 switch (hw->fc) { 893 switch (hw->fc) {
886 case E1000_FC_NONE: 894 case E1000_FC_NONE:
887 /* Flow control is completely disabled by a software over-ride. */ 895 /* Flow ctrl is completely disabled by a software over-ride */
888 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 896 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
889 break; 897 break;
890 case E1000_FC_RX_PAUSE: 898 case E1000_FC_RX_PAUSE:
891 /* RX Flow control is enabled and TX Flow control is disabled by a 899 /* Rx Flow control is enabled and Tx Flow control is disabled by
892 * software over-ride. Since there really isn't a way to advertise 900 * a software over-ride. Since there really isn't a way to
893 * that we are capable of RX Pause ONLY, we will advertise that we 901 * advertise that we are capable of Rx Pause ONLY, we will
894 * support both symmetric and asymmetric RX PAUSE. Later, we will 902 * advertise that we support both symmetric and asymmetric Rx
895 * disable the adapter's ability to send PAUSE frames. 903 * PAUSE. Later, we will disable the adapter's ability to send
904 * PAUSE frames.
896 */ 905 */
897 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 906 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
898 break; 907 break;
899 case E1000_FC_TX_PAUSE: 908 case E1000_FC_TX_PAUSE:
900 /* TX Flow control is enabled, and RX Flow control is disabled, by a 909 /* Tx Flow control is enabled, and Rx Flow control is disabled,
901 * software over-ride. 910 * by a software over-ride.
902 */ 911 */
903 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 912 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
904 break; 913 break;
905 case E1000_FC_FULL: 914 case E1000_FC_FULL:
906 /* Flow control (both RX and TX) is enabled by a software over-ride. */ 915 /* Flow control (both Rx and Tx) is enabled by a software
916 * over-ride.
917 */
907 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 918 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
908 break; 919 break;
909 default: 920 default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
912 break; 923 break;
913 } 924 }
914 925
915 /* Since auto-negotiation is enabled, take the link out of reset (the link 926 /* Since auto-negotiation is enabled, take the link out of reset (the
916 * will be in reset, because we previously reset the chip). This will 927 * link will be in reset, because we previously reset the chip). This
917 * restart auto-negotiation. If auto-negotiation is successful then the 928 * will restart auto-negotiation. If auto-negotiation is successful
918 * link-up status bit will be set and the flow control enable bits (RFCE 929 * then the link-up status bit will be set and the flow control enable
919 * and TFCE) will be set according to their negotiated value. 930 * bits (RFCE and TFCE) will be set according to their negotiated value.
920 */ 931 */
921 e_dbg("Auto-negotiation enabled\n"); 932 e_dbg("Auto-negotiation enabled\n");
922 933
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
927 hw->txcw = txcw; 938 hw->txcw = txcw;
928 msleep(1); 939 msleep(1);
929 940
930 /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" 941 /* If we have a signal (the cable is plugged in) then poll for a
931 * indication in the Device Status Register. Time-out if a link isn't 942 * "Link-Up" indication in the Device Status Register. Time-out if a
932 * seen in 500 milliseconds seconds (Auto-negotiation should complete in 943 * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
933 * less than 500 milliseconds even if the other end is doing it in SW). 944 * complete in less than 500 milliseconds even if the other end is doing
934 * For internal serdes, we just assume a signal is present, then poll. 945 * it in SW). For internal serdes, we just assume a signal is present,
946 * then poll.
935 */ 947 */
936 if (hw->media_type == e1000_media_type_internal_serdes || 948 if (hw->media_type == e1000_media_type_internal_serdes ||
937 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { 949 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
946 e_dbg("Never got a valid link from auto-neg!!!\n"); 958 e_dbg("Never got a valid link from auto-neg!!!\n");
947 hw->autoneg_failed = 1; 959 hw->autoneg_failed = 1;
948 /* AutoNeg failed to achieve a link, so we'll call 960 /* AutoNeg failed to achieve a link, so we'll call
949 * e1000_check_for_link. This routine will force the link up if 961 * e1000_check_for_link. This routine will force the
950 * we detect a signal. This will allow us to communicate with 962 * link up if we detect a signal. This will allow us to
951 * non-autonegotiating link partners. 963 * communicate with non-autonegotiating link partners.
952 */ 964 */
953 ret_val = e1000_check_for_link(hw); 965 ret_val = e1000_check_for_link(hw);
954 if (ret_val) { 966 if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
1042 e_dbg("e1000_copper_link_preconfig"); 1054 e_dbg("e1000_copper_link_preconfig");
1043 1055
1044 ctrl = er32(CTRL); 1056 ctrl = er32(CTRL);
1045 /* With 82543, we need to force speed and duplex on the MAC equal to what 1057 /* With 82543, we need to force speed and duplex on the MAC equal to
1046 * the PHY speed and duplex configuration is. In addition, we need to 1058 * what the PHY speed and duplex configuration is. In addition, we need
1047 * perform a hardware reset on the PHY to take it out of reset. 1059 * to perform a hardware reset on the PHY to take it out of reset.
1048 */ 1060 */
1049 if (hw->mac_type > e1000_82543) { 1061 if (hw->mac_type > e1000_82543) {
1050 ctrl |= E1000_CTRL_SLU; 1062 ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1175 1187
1176 /* when autonegotiation advertisement is only 1000Mbps then we 1188 /* when autonegotiation advertisement is only 1000Mbps then we
1177 * should disable SmartSpeed and enable Auto MasterSlave 1189 * should disable SmartSpeed and enable Auto MasterSlave
1178 * resolution as hardware default. */ 1190 * resolution as hardware default.
1191 */
1179 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1192 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1180 /* Disable SmartSpeed */ 1193 /* Disable SmartSpeed */
1181 ret_val = 1194 ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1485 1498
1486 if (hw->autoneg) { 1499 if (hw->autoneg) {
1487 /* Setup autoneg and flow control advertisement 1500 /* Setup autoneg and flow control advertisement
1488 * and perform autonegotiation */ 1501 * and perform autonegotiation
1502 */
1489 ret_val = e1000_copper_link_autoneg(hw); 1503 ret_val = e1000_copper_link_autoneg(hw);
1490 if (ret_val) 1504 if (ret_val)
1491 return ret_val; 1505 return ret_val;
1492 } else { 1506 } else {
1493 /* PHY will be set to 10H, 10F, 100H,or 100F 1507 /* PHY will be set to 10H, 10F, 100H,or 100F
1494 * depending on value from forced_speed_duplex. */ 1508 * depending on value from forced_speed_duplex.
1509 */
1495 e_dbg("Forcing speed and duplex\n"); 1510 e_dbg("Forcing speed and duplex\n");
1496 ret_val = e1000_phy_force_speed_duplex(hw); 1511 ret_val = e1000_phy_force_speed_duplex(hw);
1497 if (ret_val) { 1512 if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1609 * setup the PHY advertisement registers accordingly. If 1624 * setup the PHY advertisement registers accordingly. If
1610 * auto-negotiation is enabled, then software will have to set the 1625 * auto-negotiation is enabled, then software will have to set the
1611 * "PAUSE" bits to the correct value in the Auto-Negotiation 1626 * "PAUSE" bits to the correct value in the Auto-Negotiation
1612 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. 1627 * Advertisement Register (PHY_AUTONEG_ADV) and re-start
1628 * auto-negotiation.
1613 * 1629 *
1614 * The possible values of the "fc" parameter are: 1630 * The possible values of the "fc" parameter are:
1615 * 0: Flow control is completely disabled 1631 * 0: Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1636 * capable of RX Pause ONLY, we will advertise that we 1652 * capable of RX Pause ONLY, we will advertise that we
1637 * support both symmetric and asymmetric RX PAUSE. Later 1653 * support both symmetric and asymmetric RX PAUSE. Later
1638 * (in e1000_config_fc_after_link_up) we will disable the 1654 * (in e1000_config_fc_after_link_up) we will disable the
1639 *hw's ability to send PAUSE frames. 1655 * hw's ability to send PAUSE frames.
1640 */ 1656 */
1641 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1657 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1642 break; 1658 break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1720 /* Are we forcing Full or Half Duplex? */ 1736 /* Are we forcing Full or Half Duplex? */
1721 if (hw->forced_speed_duplex == e1000_100_full || 1737 if (hw->forced_speed_duplex == e1000_100_full ||
1722 hw->forced_speed_duplex == e1000_10_full) { 1738 hw->forced_speed_duplex == e1000_10_full) {
1723 /* We want to force full duplex so we SET the full duplex bits in the 1739 /* We want to force full duplex so we SET the full duplex bits
1724 * Device and MII Control Registers. 1740 * in the Device and MII Control Registers.
1725 */ 1741 */
1726 ctrl |= E1000_CTRL_FD; 1742 ctrl |= E1000_CTRL_FD;
1727 mii_ctrl_reg |= MII_CR_FULL_DUPLEX; 1743 mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
1728 e_dbg("Full Duplex\n"); 1744 e_dbg("Full Duplex\n");
1729 } else { 1745 } else {
1730 /* We want to force half duplex so we CLEAR the full duplex bits in 1746 /* We want to force half duplex so we CLEAR the full duplex bits
1731 * the Device and MII Control Registers. 1747 * in the Device and MII Control Registers.
1732 */ 1748 */
1733 ctrl &= ~E1000_CTRL_FD; 1749 ctrl &= ~E1000_CTRL_FD;
1734 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; 1750 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1762 if (ret_val) 1778 if (ret_val)
1763 return ret_val; 1779 return ret_val;
1764 1780
1765 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 1781 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
1766 * forced whenever speed are duplex are forced. 1782 * MDI forced whenever speed are duplex are forced.
1767 */ 1783 */
1768 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1784 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1769 ret_val = 1785 ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1814 e_dbg("Waiting for forced speed/duplex link.\n"); 1830 e_dbg("Waiting for forced speed/duplex link.\n");
1815 mii_status_reg = 0; 1831 mii_status_reg = 0;
1816 1832
1817 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 1833 /* Wait for autoneg to complete or 4.5 seconds to expire */
1818 for (i = PHY_FORCE_TIME; i > 0; i--) { 1834 for (i = PHY_FORCE_TIME; i > 0; i--) {
1819 /* Read the MII Status Register and wait for Auto-Neg Complete bit 1835 /* Read the MII Status Register and wait for Auto-Neg
1820 * to be set. 1836 * Complete bit to be set.
1821 */ 1837 */
1822 ret_val = 1838 ret_val =
1823 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1839 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1834 msleep(100); 1850 msleep(100);
1835 } 1851 }
1836 if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { 1852 if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
1837 /* We didn't get link. Reset the DSP and wait again for link. */ 1853 /* We didn't get link. Reset the DSP and wait again
1854 * for link.
1855 */
1838 ret_val = e1000_phy_reset_dsp(hw); 1856 ret_val = e1000_phy_reset_dsp(hw);
1839 if (ret_val) { 1857 if (ret_val) {
1840 e_dbg("Error Resetting PHY DSP\n"); 1858 e_dbg("Error Resetting PHY DSP\n");
1841 return ret_val; 1859 return ret_val;
1842 } 1860 }
1843 } 1861 }
1844 /* This loop will early-out if the link condition has been met. */ 1862 /* This loop will early-out if the link condition has been
1863 * met
1864 */
1845 for (i = PHY_FORCE_TIME; i > 0; i--) { 1865 for (i = PHY_FORCE_TIME; i > 0; i--) {
1846 if (mii_status_reg & MII_SR_LINK_STATUS) 1866 if (mii_status_reg & MII_SR_LINK_STATUS)
1847 break; 1867 break;
1848 msleep(100); 1868 msleep(100);
1849 /* Read the MII Status Register and wait for Auto-Neg Complete bit 1869 /* Read the MII Status Register and wait for Auto-Neg
1850 * to be set. 1870 * Complete bit to be set.
1851 */ 1871 */
1852 ret_val = 1872 ret_val =
1853 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 1873 e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1862 } 1882 }
1863 1883
1864 if (hw->phy_type == e1000_phy_m88) { 1884 if (hw->phy_type == e1000_phy_m88) {
1865 /* Because we reset the PHY above, we need to re-force TX_CLK in the 1885 /* Because we reset the PHY above, we need to re-force TX_CLK in
1866 * Extended PHY Specific Control Register to 25MHz clock. This value 1886 * the Extended PHY Specific Control Register to 25MHz clock.
1867 * defaults back to a 2.5MHz clock when the PHY is reset. 1887 * This value defaults back to a 2.5MHz clock when the PHY is
1888 * reset.
1868 */ 1889 */
1869 ret_val = 1890 ret_val =
1870 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 1891 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1879 if (ret_val) 1900 if (ret_val)
1880 return ret_val; 1901 return ret_val;
1881 1902
1882 /* In addition, because of the s/w reset above, we need to enable CRS on 1903 /* In addition, because of the s/w reset above, we need to
1883 * TX. This must be set for both full and half duplex operation. 1904 * enable CRS on Tx. This must be set for both full and half
1905 * duplex operation.
1884 */ 1906 */
1885 ret_val = 1907 ret_val =
1886 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1908 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1951 e_dbg("e1000_config_mac_to_phy"); 1973 e_dbg("e1000_config_mac_to_phy");
1952 1974
1953 /* 82544 or newer MAC, Auto Speed Detection takes care of 1975 /* 82544 or newer MAC, Auto Speed Detection takes care of
1954 * MAC speed/duplex configuration.*/ 1976 * MAC speed/duplex configuration.
1977 */
1955 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) 1978 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
1956 return E1000_SUCCESS; 1979 return E1000_SUCCESS;
1957 1980
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1985 * registers depending on negotiated values. 2008 * registers depending on negotiated values.
1986 */ 2009 */
1987 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 2010 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1988 &phy_data); 2011 &phy_data);
1989 if (ret_val) 2012 if (ret_val)
1990 return ret_val; 2013 return ret_val;
1991 2014
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
2002 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 2025 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2003 ctrl |= E1000_CTRL_SPD_1000; 2026 ctrl |= E1000_CTRL_SPD_1000;
2004 else if ((phy_data & M88E1000_PSSR_SPEED) == 2027 else if ((phy_data & M88E1000_PSSR_SPEED) ==
2005 M88E1000_PSSR_100MBS) 2028 M88E1000_PSSR_100MBS)
2006 ctrl |= E1000_CTRL_SPD_100; 2029 ctrl |= E1000_CTRL_SPD_100;
2007 } 2030 }
2008 2031
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2135 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { 2158 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
2136 /* The AutoNeg process has completed, so we now need to 2159 /* The AutoNeg process has completed, so we now need to
2137 * read both the Auto Negotiation Advertisement Register 2160 * read both the Auto Negotiation Advertisement Register
2138 * (Address 4) and the Auto_Negotiation Base Page Ability 2161 * (Address 4) and the Auto_Negotiation Base Page
2139 * Register (Address 5) to determine how flow control was 2162 * Ability Register (Address 5) to determine how flow
2140 * negotiated. 2163 * control was negotiated.
2141 */ 2164 */
2142 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 2165 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
2143 &mii_nway_adv_reg); 2166 &mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2148 if (ret_val) 2171 if (ret_val)
2149 return ret_val; 2172 return ret_val;
2150 2173
2151 /* Two bits in the Auto Negotiation Advertisement Register 2174 /* Two bits in the Auto Negotiation Advertisement
2152 * (Address 4) and two bits in the Auto Negotiation Base 2175 * Register (Address 4) and two bits in the Auto
2153 * Page Ability Register (Address 5) determine flow control 2176 * Negotiation Base Page Ability Register (Address 5)
2154 * for both the PHY and the link partner. The following 2177 * determine flow control for both the PHY and the link
2155 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, 2178 * partner. The following table, taken out of the IEEE
2156 * 1999, describes these PAUSE resolution bits and how flow 2179 * 802.3ab/D6.0 dated March 25, 1999, describes these
2157 * control is determined based upon these settings. 2180 * PAUSE resolution bits and how flow control is
2181 * determined based upon these settings.
2158 * NOTE: DC = Don't Care 2182 * NOTE: DC = Don't Care
2159 * 2183 *
2160 * LOCAL DEVICE | LINK PARTNER 2184 * LOCAL DEVICE | LINK PARTNER
2161 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution 2185 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
2162 *-------|---------|-------|---------|-------------------- 2186 *-------|---------|-------|---------|------------------
2163 * 0 | 0 | DC | DC | E1000_FC_NONE 2187 * 0 | 0 | DC | DC | E1000_FC_NONE
2164 * 0 | 1 | 0 | DC | E1000_FC_NONE 2188 * 0 | 1 | 0 | DC | E1000_FC_NONE
2165 * 0 | 1 | 1 | 0 | E1000_FC_NONE 2189 * 0 | 1 | 1 | 0 | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2178 * 2202 *
2179 * LOCAL DEVICE | LINK PARTNER 2203 * LOCAL DEVICE | LINK PARTNER
2180 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2204 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2181 *-------|---------|-------|---------|-------------------- 2205 *-------|---------|-------|---------|------------------
2182 * 1 | DC | 1 | DC | E1000_FC_FULL 2206 * 1 | DC | 1 | DC | E1000_FC_FULL
2183 * 2207 *
2184 */ 2208 */
2185 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2209 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2186 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 2210 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
2187 /* Now we need to check if the user selected RX ONLY 2211 /* Now we need to check if the user selected Rx
2188 * of pause frames. In this case, we had to advertise 2212 * ONLY of pause frames. In this case, we had
2189 * FULL flow control because we could not advertise RX 2213 * to advertise FULL flow control because we
2190 * ONLY. Hence, we must now check to see if we need to 2214 * could not advertise Rx ONLY. Hence, we must
2191 * turn OFF the TRANSMISSION of PAUSE frames. 2215 * now check to see if we need to turn OFF the
2216 * TRANSMISSION of PAUSE frames.
2192 */ 2217 */
2193 if (hw->original_fc == E1000_FC_FULL) { 2218 if (hw->original_fc == E1000_FC_FULL) {
2194 hw->fc = E1000_FC_FULL; 2219 hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2203 * 2228 *
2204 * LOCAL DEVICE | LINK PARTNER 2229 * LOCAL DEVICE | LINK PARTNER
2205 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2230 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2206 *-------|---------|-------|---------|-------------------- 2231 *-------|---------|-------|---------|------------------
2207 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE 2232 * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
2208 * 2233 *
2209 */ 2234 */
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2220 * 2245 *
2221 * LOCAL DEVICE | LINK PARTNER 2246 * LOCAL DEVICE | LINK PARTNER
2222 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 2247 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
2223 *-------|---------|-------|---------|-------------------- 2248 *-------|---------|-------|---------|------------------
2224 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE 2249 * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
2225 * 2250 *
2226 */ 2251 */
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2233 e_dbg 2258 e_dbg
2234 ("Flow Control = RX PAUSE frames only.\n"); 2259 ("Flow Control = RX PAUSE frames only.\n");
2235 } 2260 }
2236 /* Per the IEEE spec, at this point flow control should be 2261 /* Per the IEEE spec, at this point flow control should
2237 * disabled. However, we want to consider that we could 2262 * be disabled. However, we want to consider that we
2238 * be connected to a legacy switch that doesn't advertise 2263 * could be connected to a legacy switch that doesn't
2239 * desired flow control, but can be forced on the link 2264 * advertise desired flow control, but can be forced on
2240 * partner. So if we advertised no flow control, that is 2265 * the link partner. So if we advertised no flow
2241 * what we will resolve to. If we advertised some kind of 2266 * control, that is what we will resolve to. If we
2242 * receive capability (Rx Pause Only or Full Flow Control) 2267 * advertised some kind of receive capability (Rx Pause
2243 * and the link partner advertised none, we will configure 2268 * Only or Full Flow Control) and the link partner
2244 * ourselves to enable Rx Flow Control only. We can do 2269 * advertised none, we will configure ourselves to
2245 * this safely for two reasons: If the link partner really 2270 * enable Rx Flow Control only. We can do this safely
2246 * didn't want flow control enabled, and we enable Rx, no 2271 * for two reasons: If the link partner really
2247 * harm done since we won't be receiving any PAUSE frames 2272 * didn't want flow control enabled, and we enable Rx,
2248 * anyway. If the intent on the link partner was to have 2273 * no harm done since we won't be receiving any PAUSE
2249 * flow control enabled, then by us enabling RX only, we 2274 * frames anyway. If the intent on the link partner was
2250 * can at least receive pause frames and process them. 2275 * to have flow control enabled, then by us enabling Rx
2251 * This is a good idea because in most cases, since we are 2276 * only, we can at least receive pause frames and
2252 * predominantly a server NIC, more times than not we will 2277 * process them. This is a good idea because in most
2253 * be asked to delay transmission of packets than asking 2278 * cases, since we are predominantly a server NIC, more
2254 * our link partner to pause transmission of frames. 2279 * times than not we will be asked to delay transmission
2280 * of packets than asking our link partner to pause
2281 * transmission of frames.
2255 */ 2282 */
2256 else if ((hw->original_fc == E1000_FC_NONE || 2283 else if ((hw->original_fc == E1000_FC_NONE ||
2257 hw->original_fc == E1000_FC_TX_PAUSE) || 2284 hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2316 status = er32(STATUS); 2343 status = er32(STATUS);
2317 rxcw = er32(RXCW); 2344 rxcw = er32(RXCW);
2318 2345
2319 /* 2346 /* If we don't have link (auto-negotiation failed or link partner
2320 * If we don't have link (auto-negotiation failed or link partner
2321 * cannot auto-negotiate), and our link partner is not trying to 2347 * cannot auto-negotiate), and our link partner is not trying to
2322 * auto-negotiate with us (we are receiving idles or data), 2348 * auto-negotiate with us (we are receiving idles or data),
2323 * we need to force link up. We also need to give auto-negotiation 2349 * we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2346 goto out; 2372 goto out;
2347 } 2373 }
2348 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 2374 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
2349 /* 2375 /* If we are forcing link and we are receiving /C/ ordered
2350 * If we are forcing link and we are receiving /C/ ordered
2351 * sets, re-enable auto-negotiation in the TXCW register 2376 * sets, re-enable auto-negotiation in the TXCW register
2352 * and disable forced link in the Device Control register 2377 * and disable forced link in the Device Control register
2353 * in an attempt to auto-negotiate with our link partner. 2378 * in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2358 2383
2359 hw->serdes_has_link = true; 2384 hw->serdes_has_link = true;
2360 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 2385 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
2361 /* 2386 /* If we force link for non-auto-negotiation switch, check
2362 * If we force link for non-auto-negotiation switch, check
2363 * link status based on MAC synchronization for internal 2387 * link status based on MAC synchronization for internal
2364 * serdes media type. 2388 * serdes media type.
2365 */ 2389 */
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2468 2492
2469 if (phy_data & MII_SR_LINK_STATUS) { 2493 if (phy_data & MII_SR_LINK_STATUS) {
2470 hw->get_link_status = false; 2494 hw->get_link_status = false;
2471 /* Check if there was DownShift, must be checked immediately after 2495 /* Check if there was DownShift, must be checked
2472 * link-up */ 2496 * immediately after link-up
2497 */
2473 e1000_check_downshift(hw); 2498 e1000_check_downshift(hw);
2474 2499
2475 /* If we are on 82544 or 82543 silicon and speed/duplex 2500 /* If we are on 82544 or 82543 silicon and speed/duplex
2476 * are forced to 10H or 10F, then we will implement the polarity 2501 * are forced to 10H or 10F, then we will implement the
2477 * reversal workaround. We disable interrupts first, and upon 2502 * polarity reversal workaround. We disable interrupts
2478 * returning, place the devices interrupt state to its previous 2503 * first, and upon returning, place the devices
2479 * value except for the link status change interrupt which will 2504 * interrupt state to its previous value except for the
2505 * link status change interrupt which will
2480 * happen due to the execution of this workaround. 2506 * happen due to the execution of this workaround.
2481 */ 2507 */
2482 2508
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2527 } 2553 }
2528 } 2554 }
2529 2555
2530 /* Configure Flow Control now that Auto-Neg has completed. First, we 2556 /* Configure Flow Control now that Auto-Neg has completed.
2531 * need to restore the desired flow control settings because we may 2557 * First, we need to restore the desired flow control settings
2532 * have had to re-autoneg with a different link partner. 2558 * because we may have had to re-autoneg with a different link
2559 * partner.
2533 */ 2560 */
2534 ret_val = e1000_config_fc_after_link_up(hw); 2561 ret_val = e1000_config_fc_after_link_up(hw);
2535 if (ret_val) { 2562 if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2538 } 2565 }
2539 2566
2540 /* At this point we know that we are on copper and we have 2567 /* At this point we know that we are on copper and we have
2541 * auto-negotiated link. These are conditions for checking the link 2568 * auto-negotiated link. These are conditions for checking the
2542 * partner capability register. We use the link speed to determine if 2569 * link partner capability register. We use the link speed to
2543 * TBI compatibility needs to be turned on or off. If the link is not 2570 * determine if TBI compatibility needs to be turned on or off.
2544 * at gigabit speed, then TBI compatibility is not needed. If we are 2571 * If the link is not at gigabit speed, then TBI compatibility
2545 * at gigabit speed, we turn on TBI compatibility. 2572 * is not needed. If we are at gigabit speed, we turn on TBI
2573 * compatibility.
2546 */ 2574 */
2547 if (hw->tbi_compatibility_en) { 2575 if (hw->tbi_compatibility_en) {
2548 u16 speed, duplex; 2576 u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2554 return ret_val; 2582 return ret_val;
2555 } 2583 }
2556 if (speed != SPEED_1000) { 2584 if (speed != SPEED_1000) {
2557 /* If link speed is not set to gigabit speed, we do not need 2585 /* If link speed is not set to gigabit speed, we
2558 * to enable TBI compatibility. 2586 * do not need to enable TBI compatibility.
2559 */ 2587 */
2560 if (hw->tbi_compatibility_on) { 2588 if (hw->tbi_compatibility_on) {
2561 /* If we previously were in the mode, turn it off. */ 2589 /* If we previously were in the mode,
2590 * turn it off.
2591 */
2562 rctl = er32(RCTL); 2592 rctl = er32(RCTL);
2563 rctl &= ~E1000_RCTL_SBP; 2593 rctl &= ~E1000_RCTL_SBP;
2564 ew32(RCTL, rctl); 2594 ew32(RCTL, rctl);
2565 hw->tbi_compatibility_on = false; 2595 hw->tbi_compatibility_on = false;
2566 } 2596 }
2567 } else { 2597 } else {
2568 /* If TBI compatibility is was previously off, turn it on. For 2598 /* If TBI compatibility is was previously off,
2569 * compatibility with a TBI link partner, we will store bad 2599 * turn it on. For compatibility with a TBI link
2570 * packets. Some frames have an additional byte on the end and 2600 * partner, we will store bad packets. Some
2601 * frames have an additional byte on the end and
2571 * will look like CRC errors to to the hardware. 2602 * will look like CRC errors to to the hardware.
2572 */ 2603 */
2573 if (!hw->tbi_compatibility_on) { 2604 if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
2629 *duplex = FULL_DUPLEX; 2660 *duplex = FULL_DUPLEX;
2630 } 2661 }
2631 2662
2632 /* IGP01 PHY may advertise full duplex operation after speed downgrade even 2663 /* IGP01 PHY may advertise full duplex operation after speed downgrade
2633 * if it is operating at half duplex. Here we set the duplex settings to 2664 * even if it is operating at half duplex. Here we set the duplex
2634 * match the duplex in the link partner's capabilities. 2665 * settings to match the duplex in the link partner's capabilities.
2635 */ 2666 */
2636 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { 2667 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
2637 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); 2668 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
2697 */ 2728 */
2698static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) 2729static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
2699{ 2730{
2700 /* Raise the clock input to the Management Data Clock (by setting the MDC 2731 /* Raise the clock input to the Management Data Clock (by setting the
2701 * bit), and then delay 10 microseconds. 2732 * MDC bit), and then delay 10 microseconds.
2702 */ 2733 */
2703 ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); 2734 ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
2704 E1000_WRITE_FLUSH(); 2735 E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
2712 */ 2743 */
2713static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) 2744static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
2714{ 2745{
2715 /* Lower the clock input to the Management Data Clock (by clearing the MDC 2746 /* Lower the clock input to the Management Data Clock (by clearing the
2716 * bit), and then delay 10 microseconds. 2747 * MDC bit), and then delay 10 microseconds.
2717 */ 2748 */
2718 ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); 2749 ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
2719 E1000_WRITE_FLUSH(); 2750 E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
2746 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); 2777 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
2747 2778
2748 while (mask) { 2779 while (mask) {
2749 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and 2780 /* A "1" is shifted out to the PHY by setting the MDIO bit to
2750 * then raising and lowering the Management Data Clock. A "0" is 2781 * "1" and then raising and lowering the Management Data Clock.
2751 * shifted out to the PHY by setting the MDIO bit to "0" and then 2782 * A "0" is shifted out to the PHY by setting the MDIO bit to
2752 * raising and lowering the clock. 2783 * "0" and then raising and lowering the clock.
2753 */ 2784 */
2754 if (data & mask) 2785 if (data & mask)
2755 ctrl |= E1000_CTRL_MDIO; 2786 ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
2781 u8 i; 2812 u8 i;
2782 2813
2783 /* In order to read a register from the PHY, we need to shift in a total 2814 /* In order to read a register from the PHY, we need to shift in a total
2784 * of 18 bits from the PHY. The first two bit (turnaround) times are used 2815 * of 18 bits from the PHY. The first two bit (turnaround) times are
2785 * to avoid contention on the MDIO pin when a read operation is performed. 2816 * used to avoid contention on the MDIO pin when a read operation is
2786 * These two bits are ignored by us and thrown away. Bits are "shifted in" 2817 * performed. These two bits are ignored by us and thrown away. Bits are
2787 * by raising the input to the Management Data Clock (setting the MDC bit), 2818 * "shifted in" by raising the input to the Management Data Clock
2788 * and then reading the value of the MDIO bit. 2819 * (setting the MDC bit), and then reading the value of the MDIO bit.
2789 */ 2820 */
2790 ctrl = er32(CTRL); 2821 ctrl = er32(CTRL);
2791 2822
2792 /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ 2823 /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
2824 * input.
2825 */
2793 ctrl &= ~E1000_CTRL_MDIO_DIR; 2826 ctrl &= ~E1000_CTRL_MDIO_DIR;
2794 ctrl &= ~E1000_CTRL_MDIO; 2827 ctrl &= ~E1000_CTRL_MDIO;
2795 2828
2796 ew32(CTRL, ctrl); 2829 ew32(CTRL, ctrl);
2797 E1000_WRITE_FLUSH(); 2830 E1000_WRITE_FLUSH();
2798 2831
2799 /* Raise and Lower the clock before reading in the data. This accounts for 2832 /* Raise and Lower the clock before reading in the data. This accounts
2800 * the turnaround bits. The first clock occurred when we clocked out the 2833 * for the turnaround bits. The first clock occurred when we clocked out
2801 * last bit of the Register Address. 2834 * the last bit of the Register Address.
2802 */ 2835 */
2803 e1000_raise_mdi_clk(hw, &ctrl); 2836 e1000_raise_mdi_clk(hw, &ctrl);
2804 e1000_lower_mdi_clk(hw, &ctrl); 2837 e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2870 2903
2871 if (hw->mac_type > e1000_82543) { 2904 if (hw->mac_type > e1000_82543) {
2872 /* Set up Op-code, Phy Address, and register address in the MDI 2905 /* Set up Op-code, Phy Address, and register address in the MDI
2873 * Control register. The MAC will take care of interfacing with the 2906 * Control register. The MAC will take care of interfacing with
2874 * PHY to retrieve the desired data. 2907 * the PHY to retrieve the desired data.
2875 */ 2908 */
2876 if (hw->mac_type == e1000_ce4100) { 2909 if (hw->mac_type == e1000_ce4100) {
2877 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2910 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2929 *phy_data = (u16) mdic; 2962 *phy_data = (u16) mdic;
2930 } 2963 }
2931 } else { 2964 } else {
2932 /* We must first send a preamble through the MDIO pin to signal the 2965 /* We must first send a preamble through the MDIO pin to signal
2933 * beginning of an MII instruction. This is done by sending 32 2966 * the beginning of an MII instruction. This is done by sending
2934 * consecutive "1" bits. 2967 * 32 consecutive "1" bits.
2935 */ 2968 */
2936 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); 2969 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
2937 2970
2938 /* Now combine the next few fields that are required for a read 2971 /* Now combine the next few fields that are required for a read
2939 * operation. We use this method instead of calling the 2972 * operation. We use this method instead of calling the
2940 * e1000_shift_out_mdi_bits routine five different times. The format of 2973 * e1000_shift_out_mdi_bits routine five different times. The
2941 * a MII read instruction consists of a shift out of 14 bits and is 2974 * format of a MII read instruction consists of a shift out of
2942 * defined as follows: 2975 * 14 bits and is defined as follows:
2943 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> 2976 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
2944 * followed by a shift in of 18 bits. This first two bits shifted in 2977 * followed by a shift in of 18 bits. This first two bits
2945 * are TurnAround bits used to avoid contention on the MDIO pin when a 2978 * shifted in are TurnAround bits used to avoid contention on
2946 * READ operation is performed. These two bits are thrown away 2979 * the MDIO pin when a READ operation is performed. These two
2947 * followed by a shift in of 16 bits which contains the desired data. 2980 * bits are thrown away followed by a shift in of 16 bits which
2981 * contains the desired data.
2948 */ 2982 */
2949 mdic = ((reg_addr) | (phy_addr << 5) | 2983 mdic = ((reg_addr) | (phy_addr << 5) |
2950 (PHY_OP_READ << 10) | (PHY_SOF << 12)); 2984 (PHY_OP_READ << 10) | (PHY_SOF << 12));
2951 2985
2952 e1000_shift_out_mdi_bits(hw, mdic, 14); 2986 e1000_shift_out_mdi_bits(hw, mdic, 14);
2953 2987
2954 /* Now that we've shifted out the read command to the MII, we need to 2988 /* Now that we've shifted out the read command to the MII, we
2955 * "shift in" the 16-bit value (18 total bits) of the requested PHY 2989 * need to "shift in" the 16-bit value (18 total bits) of the
2956 * register address. 2990 * requested PHY register address.
2957 */ 2991 */
2958 *phy_data = e1000_shift_in_mdi_bits(hw); 2992 *phy_data = e1000_shift_in_mdi_bits(hw);
2959 } 2993 }
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3060 } 3094 }
3061 } 3095 }
3062 } else { 3096 } else {
3063 /* We'll need to use the SW defined pins to shift the write command 3097 /* We'll need to use the SW defined pins to shift the write
3064 * out to the PHY. We first send a preamble to the PHY to signal the 3098 * command out to the PHY. We first send a preamble to the PHY
3065 * beginning of the MII instruction. This is done by sending 32 3099 * to signal the beginning of the MII instruction. This is done
3066 * consecutive "1" bits. 3100 * by sending 32 consecutive "1" bits.
3067 */ 3101 */
3068 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); 3102 e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
3069 3103
3070 /* Now combine the remaining required fields that will indicate a 3104 /* Now combine the remaining required fields that will indicate
3071 * write operation. We use this method instead of calling the 3105 * a write operation. We use this method instead of calling the
3072 * e1000_shift_out_mdi_bits routine for each field in the command. The 3106 * e1000_shift_out_mdi_bits routine for each field in the
3073 * format of a MII write instruction is as follows: 3107 * command. The format of a MII write instruction is as follows:
3074 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. 3108 * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
3075 */ 3109 */
3076 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | 3110 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
3077 (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); 3111 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
3100 e_dbg("Resetting Phy...\n"); 3134 e_dbg("Resetting Phy...\n");
3101 3135
3102 if (hw->mac_type > e1000_82543) { 3136 if (hw->mac_type > e1000_82543) {
3103 /* Read the device control register and assert the E1000_CTRL_PHY_RST 3137 /* Read the device control register and assert the
3104 * bit. Then, take it out of reset. 3138 * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
3105 * For e1000 hardware, we delay for 10ms between the assert 3139 * For e1000 hardware, we delay for 10ms between the assert
3106 * and deassert. 3140 * and de-assert.
3107 */ 3141 */
3108 ctrl = er32(CTRL); 3142 ctrl = er32(CTRL);
3109 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); 3143 ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
3115 E1000_WRITE_FLUSH(); 3149 E1000_WRITE_FLUSH();
3116 3150
3117 } else { 3151 } else {
3118 /* Read the Extended Device Control Register, assert the PHY_RESET_DIR 3152 /* Read the Extended Device Control Register, assert the
3119 * bit to put the PHY into reset. Then, take it out of reset. 3153 * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
3154 * out of reset.
3120 */ 3155 */
3121 ctrl_ext = er32(CTRL_EXT); 3156 ctrl_ext = er32(CTRL_EXT);
3122 ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; 3157 ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
3301 e_dbg("e1000_phy_igp_get_info"); 3336 e_dbg("e1000_phy_igp_get_info");
3302 3337
3303 /* The downshift status is checked only once, after link is established, 3338 /* The downshift status is checked only once, after link is established,
3304 * and it stored in the hw->speed_downgraded parameter. */ 3339 * and it stored in the hw->speed_downgraded parameter.
3340 */
3305 phy_info->downshift = (e1000_downshift) hw->speed_downgraded; 3341 phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
3306 3342
3307 /* IGP01E1000 does not need to support it. */ 3343 /* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
3327 3363
3328 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 3364 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
3329 IGP01E1000_PSSR_SPEED_1000MBPS) { 3365 IGP01E1000_PSSR_SPEED_1000MBPS) {
3330 /* Local/Remote Receiver Information are only valid at 1000 Mbps */ 3366 /* Local/Remote Receiver Information are only valid @ 1000
3367 * Mbps
3368 */
3331 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 3369 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
3332 if (ret_val) 3370 if (ret_val)
3333 return ret_val; 3371 return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
3379 e_dbg("e1000_phy_m88_get_info"); 3417 e_dbg("e1000_phy_m88_get_info");
3380 3418
3381 /* The downshift status is checked only once, after link is established, 3419 /* The downshift status is checked only once, after link is established,
3382 * and it stored in the hw->speed_downgraded parameter. */ 3420 * and it stored in the hw->speed_downgraded parameter.
3421 */
3383 phy_info->downshift = (e1000_downshift) hw->speed_downgraded; 3422 phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
3384 3423
3385 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 3424 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
3574 } 3613 }
3575 3614
3576 if (eeprom->type == e1000_eeprom_spi) { 3615 if (eeprom->type == e1000_eeprom_spi) {
3577 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to 3616 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
3578 * 32KB (incremented by powers of 2). 3617 * 128B to 32KB (incremented by powers of 2).
3579 */ 3618 */
3580 /* Set to default value for initial eeprom read. */ 3619 /* Set to default value for initial eeprom read. */
3581 eeprom->word_size = 64; 3620 eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
3585 eeprom_size = 3624 eeprom_size =
3586 (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; 3625 (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
3587 /* 256B eeprom size was not supported in earlier hardware, so we 3626 /* 256B eeprom size was not supported in earlier hardware, so we
3588 * bump eeprom_size up one to ensure that "1" (which maps to 256B) 3627 * bump eeprom_size up one to ensure that "1" (which maps to
3589 * is never the result used in the shifting logic below. */ 3628 * 256B) is never the result used in the shifting logic below.
3629 */
3590 if (eeprom_size) 3630 if (eeprom_size)
3591 eeprom_size++; 3631 eeprom_size++;
3592 3632
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
3618 */ 3658 */
3619static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) 3659static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
3620{ 3660{
3621 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 3661 /* Lower the clock input to the EEPROM (by clearing the SK bit), and
3622 * wait 50 microseconds. 3662 * then wait 50 microseconds.
3623 */ 3663 */
3624 *eecd = *eecd & ~E1000_EECD_SK; 3664 *eecd = *eecd & ~E1000_EECD_SK;
3625 ew32(EECD, *eecd); 3665 ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
3651 eecd |= E1000_EECD_DO; 3691 eecd |= E1000_EECD_DO;
3652 } 3692 }
3653 do { 3693 do {
3654 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", 3694 /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
3655 * and then raising and then lowering the clock (the SK bit controls 3695 * "1", and then raising and then lowering the clock (the SK bit
3656 * the clock input to the EEPROM). A "0" is shifted out to the EEPROM 3696 * controls the clock input to the EEPROM). A "0" is shifted
3657 * by setting "DI" to "0" and then raising and then lowering the clock. 3697 * out to the EEPROM by setting "DI" to "0" and then raising and
3698 * then lowering the clock.
3658 */ 3699 */
3659 eecd &= ~E1000_EECD_DI; 3700 eecd &= ~E1000_EECD_DI;
3660 3701
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
3691 3732
3692 /* In order to read a register from the EEPROM, we need to shift 'count' 3733 /* In order to read a register from the EEPROM, we need to shift 'count'
3693 * bits in from the EEPROM. Bits are "shifted in" by raising the clock 3734 * bits in from the EEPROM. Bits are "shifted in" by raising the clock
3694 * input to the EEPROM (setting the SK bit), and then reading the value of 3735 * input to the EEPROM (setting the SK bit), and then reading the value
3695 * the "DO" bit. During this "shifting in" process the "DI" bit should 3736 * of the "DO" bit. During this "shifting in" process the "DI" bit
3696 * always be clear. 3737 * should always be clear.
3697 */ 3738 */
3698 3739
3699 eecd = er32(EECD); 3740 eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3945 if (eeprom->word_size == 0) 3986 if (eeprom->word_size == 0)
3946 e1000_init_eeprom_params(hw); 3987 e1000_init_eeprom_params(hw);
3947 3988
3948 /* A check for invalid values: offset too large, too many words, and not 3989 /* A check for invalid values: offset too large, too many words, and
3949 * enough words. 3990 * not enough words.
3950 */ 3991 */
3951 if ((offset >= eeprom->word_size) 3992 if ((offset >= eeprom->word_size)
3952 || (words > eeprom->word_size - offset) || (words == 0)) { 3993 || (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3964 return -E1000_ERR_EEPROM; 4005 return -E1000_ERR_EEPROM;
3965 4006
3966 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have 4007 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
3967 * acquired the EEPROM at this point, so any returns should release it */ 4008 * acquired the EEPROM at this point, so any returns should release it
4009 */
3968 if (eeprom->type == e1000_eeprom_spi) { 4010 if (eeprom->type == e1000_eeprom_spi) {
3969 u16 word_in; 4011 u16 word_in;
3970 u8 read_opcode = EEPROM_READ_OPCODE_SPI; 4012 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3976 4018
3977 e1000_standby_eeprom(hw); 4019 e1000_standby_eeprom(hw);
3978 4020
3979 /* Some SPI eeproms use the 8th address bit embedded in the opcode */ 4021 /* Some SPI eeproms use the 8th address bit embedded in the
4022 * opcode
4023 */
3980 if ((eeprom->address_bits == 8) && (offset >= 128)) 4024 if ((eeprom->address_bits == 8) && (offset >= 128))
3981 read_opcode |= EEPROM_A8_OPCODE_SPI; 4025 read_opcode |= EEPROM_A8_OPCODE_SPI;
3982 4026
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3985 e1000_shift_out_ee_bits(hw, (u16) (offset * 2), 4029 e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
3986 eeprom->address_bits); 4030 eeprom->address_bits);
3987 4031
3988 /* Read the data. The address of the eeprom internally increments with 4032 /* Read the data. The address of the eeprom internally
3989 * each byte (spi) being read, saving on the overhead of eeprom setup 4033 * increments with each byte (spi) being read, saving on the
3990 * and tear-down. The address counter will roll over if reading beyond 4034 * overhead of eeprom setup and tear-down. The address counter
3991 * the size of the eeprom, thus allowing the entire memory to be read 4035 * will roll over if reading beyond the size of the eeprom, thus
3992 * starting from any offset. */ 4036 * allowing the entire memory to be read starting from any
4037 * offset.
4038 */
3993 for (i = 0; i < words; i++) { 4039 for (i = 0; i < words; i++) {
3994 word_in = e1000_shift_in_ee_bits(hw, 16); 4040 word_in = e1000_shift_in_ee_bits(hw, 16);
3995 data[i] = (word_in >> 8) | (word_in << 8); 4041 data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
4003 e1000_shift_out_ee_bits(hw, (u16) (offset + i), 4049 e1000_shift_out_ee_bits(hw, (u16) (offset + i),
4004 eeprom->address_bits); 4050 eeprom->address_bits);
4005 4051
4006 /* Read the data. For microwire, each word requires the overhead 4052 /* Read the data. For microwire, each word requires the
4007 * of eeprom setup and tear-down. */ 4053 * overhead of eeprom setup and tear-down.
4054 */
4008 data[i] = e1000_shift_in_ee_bits(hw, 16); 4055 data[i] = e1000_shift_in_ee_bits(hw, 16);
4009 e1000_standby_eeprom(hw); 4056 e1000_standby_eeprom(hw);
4010 } 4057 }
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
4119 if (eeprom->word_size == 0) 4166 if (eeprom->word_size == 0)
4120 e1000_init_eeprom_params(hw); 4167 e1000_init_eeprom_params(hw);
4121 4168
4122 /* A check for invalid values: offset too large, too many words, and not 4169 /* A check for invalid values: offset too large, too many words, and
4123 * enough words. 4170 * not enough words.
4124 */ 4171 */
4125 if ((offset >= eeprom->word_size) 4172 if ((offset >= eeprom->word_size)
4126 || (words > eeprom->word_size - offset) || (words == 0)) { 4173 || (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
4174 4221
4175 e1000_standby_eeprom(hw); 4222 e1000_standby_eeprom(hw);
4176 4223
4177 /* Some SPI eeproms use the 8th address bit embedded in the opcode */ 4224 /* Some SPI eeproms use the 8th address bit embedded in the
4225 * opcode
4226 */
4178 if ((eeprom->address_bits == 8) && (offset >= 128)) 4227 if ((eeprom->address_bits == 8) && (offset >= 128))
4179 write_opcode |= EEPROM_A8_OPCODE_SPI; 4228 write_opcode |= EEPROM_A8_OPCODE_SPI;
4180 4229
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
4186 4235
4187 /* Send the data */ 4236 /* Send the data */
4188 4237
4189 /* Loop to allow for up to whole page write (32 bytes) of eeprom */ 4238 /* Loop to allow for up to whole page write (32 bytes) of
4239 * eeprom
4240 */
4190 while (widx < words) { 4241 while (widx < words) {
4191 u16 word_out = data[widx]; 4242 u16 word_out = data[widx];
4192 word_out = (word_out >> 8) | (word_out << 8); 4243 word_out = (word_out >> 8) | (word_out << 8);
4193 e1000_shift_out_ee_bits(hw, word_out, 16); 4244 e1000_shift_out_ee_bits(hw, word_out, 16);
4194 widx++; 4245 widx++;
4195 4246
4196 /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE 4247 /* Some larger eeprom sizes are capable of a 32-byte
4197 * operation, while the smaller eeproms are capable of an 8-byte 4248 * PAGE WRITE operation, while the smaller eeproms are
4198 * PAGE WRITE operation. Break the inner loop to pass new address 4249 * capable of an 8-byte PAGE WRITE operation. Break the
4250 * inner loop to pass new address
4199 */ 4251 */
4200 if ((((offset + widx) * 2) % eeprom->page_size) == 0) { 4252 if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
4201 e1000_standby_eeprom(hw); 4253 e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
4249 /* Send the data */ 4301 /* Send the data */
4250 e1000_shift_out_ee_bits(hw, data[words_written], 16); 4302 e1000_shift_out_ee_bits(hw, data[words_written], 16);
4251 4303
4252 /* Toggle the CS line. This in effect tells the EEPROM to execute 4304 /* Toggle the CS line. This in effect tells the EEPROM to
4253 * the previous command. 4305 * execute the previous command.
4254 */ 4306 */
4255 e1000_standby_eeprom(hw); 4307 e1000_standby_eeprom(hw);
4256 4308
4257 /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will 4309 /* Read DO repeatedly until it is high (equal to '1'). The
4258 * signal that the command has been completed by raising the DO signal. 4310 * EEPROM will signal that the command has been completed by
4259 * If DO does not go high in 10 milliseconds, then error out. 4311 * raising the DO signal. If DO does not go high in 10
4312 * milliseconds, then error out.
4260 */ 4313 */
4261 for (i = 0; i < 200; i++) { 4314 for (i = 0; i < 200; i++) {
4262 eecd = er32(EECD); 4315 eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
4483 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 4536 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
4484 /* If the offset we want to clear is the same offset of the 4537 /* If the offset we want to clear is the same offset of the
4485 * manageability VLAN ID, then clear all bits except that of the 4538 * manageability VLAN ID, then clear all bits except that of the
4486 * manageability unit */ 4539 * manageability unit
4540 */
4487 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; 4541 vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
4488 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); 4542 E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
4489 E1000_WRITE_FLUSH(); 4543 E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
4911 * counters overcount this packet as a CRC error and undercount 4965 * counters overcount this packet as a CRC error and undercount
4912 * the packet as a good packet 4966 * the packet as a good packet
4913 */ 4967 */
4914 /* This packet should not be counted as a CRC error. */ 4968 /* This packet should not be counted as a CRC error. */
4915 stats->crcerrs--; 4969 stats->crcerrs--;
4916 /* This packet does count as a Good Packet Received. */ 4970 /* This packet does count as a Good Packet Received. */
4917 stats->gprc++; 4971 stats->gprc++;
4918 4972
4919 /* Adjust the Good Octets received counters */ 4973 /* Adjust the Good Octets received counters */
4920 carry_bit = 0x80000000 & stats->gorcl; 4974 carry_bit = 0x80000000 & stats->gorcl;
4921 stats->gorcl += frame_len; 4975 stats->gorcl += frame_len;
4922 /* If the high bit of Gorcl (the low 32 bits of the Good Octets 4976 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
5196 if (ret_val) 5250 if (ret_val)
5197 return ret_val; 5251 return ret_val;
5198 5252
5199 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to 5253 /* If speed is 1000 Mbps, must read the
5200 * find the polarity status */ 5254 * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
5255 */
5201 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 5256 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
5202 IGP01E1000_PSSR_SPEED_1000MBPS) { 5257 IGP01E1000_PSSR_SPEED_1000MBPS) {
5203 5258
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
5213 e1000_rev_polarity_reversed : 5268 e1000_rev_polarity_reversed :
5214 e1000_rev_polarity_normal; 5269 e1000_rev_polarity_normal;
5215 } else { 5270 } else {
5216 /* For 10 Mbps, read the polarity bit in the status register. (for 5271 /* For 10 Mbps, read the polarity bit in the status
5217 * 100 Mbps this bit is always 0) */ 5272 * register. (for 100 Mbps this bit is always 0)
5273 */
5218 *polarity = 5274 *polarity =
5219 (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? 5275 (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
5220 e1000_rev_polarity_reversed : 5276 e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5374 } 5430 }
5375 } else { 5431 } else {
5376 if (hw->dsp_config_state == e1000_dsp_config_activated) { 5432 if (hw->dsp_config_state == e1000_dsp_config_activated) {
5377 /* Save off the current value of register 0x2F5B to be restored at 5433 /* Save off the current value of register 0x2F5B to be
5378 * the end of the routines. */ 5434 * restored at the end of the routines.
5435 */
5379 ret_val = 5436 ret_val =
5380 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); 5437 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
5381 5438
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5391 msleep(20); 5448 msleep(20);
5392 5449
5393 ret_val = e1000_write_phy_reg(hw, 0x0000, 5450 ret_val = e1000_write_phy_reg(hw, 0x0000,
5394 IGP01E1000_IEEE_FORCE_GIGA); 5451 IGP01E1000_IEEE_FORCE_GIGA);
5395 if (ret_val) 5452 if (ret_val)
5396 return ret_val; 5453 return ret_val;
5397 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 5454 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5412 } 5469 }
5413 5470
5414 ret_val = e1000_write_phy_reg(hw, 0x0000, 5471 ret_val = e1000_write_phy_reg(hw, 0x0000,
5415 IGP01E1000_IEEE_RESTART_AUTONEG); 5472 IGP01E1000_IEEE_RESTART_AUTONEG);
5416 if (ret_val) 5473 if (ret_val)
5417 return ret_val; 5474 return ret_val;
5418 5475
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5429 } 5486 }
5430 5487
5431 if (hw->ffe_config_state == e1000_ffe_config_active) { 5488 if (hw->ffe_config_state == e1000_ffe_config_active) {
5432 /* Save off the current value of register 0x2F5B to be restored at 5489 /* Save off the current value of register 0x2F5B to be
5433 * the end of the routines. */ 5490 * restored at the end of the routines.
5491 */
5434 ret_val = 5492 ret_val =
5435 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); 5493 e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
5436 5494
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5446 msleep(20); 5504 msleep(20);
5447 5505
5448 ret_val = e1000_write_phy_reg(hw, 0x0000, 5506 ret_val = e1000_write_phy_reg(hw, 0x0000,
5449 IGP01E1000_IEEE_FORCE_GIGA); 5507 IGP01E1000_IEEE_FORCE_GIGA);
5450 if (ret_val) 5508 if (ret_val)
5451 return ret_val; 5509 return ret_val;
5452 ret_val = 5510 ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5456 return ret_val; 5514 return ret_val;
5457 5515
5458 ret_val = e1000_write_phy_reg(hw, 0x0000, 5516 ret_val = e1000_write_phy_reg(hw, 0x0000,
5459 IGP01E1000_IEEE_RESTART_AUTONEG); 5517 IGP01E1000_IEEE_RESTART_AUTONEG);
5460 if (ret_val) 5518 if (ret_val)
5461 return ret_val; 5519 return ret_val;
5462 5520
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
5542 return E1000_SUCCESS; 5600 return E1000_SUCCESS;
5543 5601
5544 /* During driver activity LPLU should not be used or it will attain link 5602 /* During driver activity LPLU should not be used or it will attain link
5545 * from the lowest speeds starting from 10Mbps. The capability is used for 5603 * from the lowest speeds starting from 10Mbps. The capability is used
5546 * Dx transitions and states */ 5604 * for Dx transitions and states
5605 */
5547 if (hw->mac_type == e1000_82541_rev_2 5606 if (hw->mac_type == e1000_82541_rev_2
5548 || hw->mac_type == e1000_82547_rev_2) { 5607 || hw->mac_type == e1000_82547_rev_2) {
5549 ret_val = 5608 ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
5563 return ret_val; 5622 return ret_val;
5564 } 5623 }
5565 5624
5566 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5625 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
5567 * Dx states where the power conservation is most important. During 5626 * during Dx states where the power conservation is most
5568 * driver activity we should enable SmartSpeed, so performance is 5627 * important. During driver activity we should enable
5569 * maintained. */ 5628 * SmartSpeed, so performance is maintained.
5629 */
5570 if (hw->smart_speed == e1000_smart_speed_on) { 5630 if (hw->smart_speed == e1000_smart_speed_on) {
5571 ret_val = 5631 ret_val =
5572 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5632 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d947e3aae1e8..8502c625dbef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
239 * e1000_init_module is the first routine called when the driver is 239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem. 240 * loaded. All it does is register with the PCI subsystem.
241 **/ 241 **/
242
243static int __init e1000_init_module(void) 242static int __init e1000_init_module(void)
244{ 243{
245 int ret; 244 int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
266 * e1000_exit_module is called just before the driver is removed 265 * e1000_exit_module is called just before the driver is removed
267 * from memory. 266 * from memory.
268 **/ 267 **/
269
270static void __exit e1000_exit_module(void) 268static void __exit e1000_exit_module(void)
271{ 269{
272 pci_unregister_driver(&e1000_driver); 270 pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
301 * e1000_irq_disable - Mask off interrupt generation on the NIC 299 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure 300 * @adapter: board private structure
303 **/ 301 **/
304
305static void e1000_irq_disable(struct e1000_adapter *adapter) 302static void e1000_irq_disable(struct e1000_adapter *adapter)
306{ 303{
307 struct e1000_hw *hw = &adapter->hw; 304 struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
315 * e1000_irq_enable - Enable default interrupt generation settings 312 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure 313 * @adapter: board private structure
317 **/ 314 **/
318
319static void e1000_irq_enable(struct e1000_adapter *adapter) 315static void e1000_irq_enable(struct e1000_adapter *adapter)
320{ 316{
321 struct e1000_hw *hw = &adapter->hw; 317 struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
398 e1000_configure_rx(adapter); 394 e1000_configure_rx(adapter);
399 /* call E1000_DESC_UNUSED which always leaves 395 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure 396 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */ 397 * next_to_use != next_to_clean
398 */
402 for (i = 0; i < adapter->num_rx_queues; i++) { 399 for (i = 0; i < adapter->num_rx_queues; i++) {
403 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 400 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404 adapter->alloc_rx_buf(adapter, ring, 401 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring)); 402 E1000_DESC_UNUSED(ring));
406 } 403 }
407} 404}
408 405
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
433 * The phy may be powered down to save power and turn off link when the 430 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others) 431 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset *** 432 * *** this routine MUST be followed by a call to e1000_reset ***
436 *
437 **/ 433 **/
438
439void e1000_power_up_phy(struct e1000_adapter *adapter) 434void e1000_power_up_phy(struct e1000_adapter *adapter)
440{ 435{
441 struct e1000_hw *hw = &adapter->hw; 436 struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
444 /* Just clear the power down bit to wake the phy back up */ 439 /* Just clear the power down bit to wake the phy back up */
445 if (hw->media_type == e1000_media_type_copper) { 440 if (hw->media_type == e1000_media_type_copper) {
446 /* according to the manual, the phy will retain its 441 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */ 442 * settings across a power-down/up cycle
443 */
448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 444 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449 mii_reg &= ~MII_CR_POWER_DOWN; 445 mii_reg &= ~MII_CR_POWER_DOWN;
450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 446 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 * The PHY cannot be powered down if any of the following is true * 455 * The PHY cannot be powered down if any of the following is true *
460 * (a) WoL is enabled 456 * (a) WoL is enabled
461 * (b) AMT is active 457 * (b) AMT is active
462 * (c) SoL/IDER session is active */ 458 * (c) SoL/IDER session is active
459 */
463 if (!adapter->wol && hw->mac_type >= e1000_82540 && 460 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) { 461 hw->media_type == e1000_media_type_copper) {
465 u16 mii_reg = 0; 462 u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
529 526
530 e1000_irq_disable(adapter); 527 e1000_irq_disable(adapter);
531 528
532 /* 529 /* Setting DOWN must be after irq_disable to prevent
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents 530 * a screaming interrupt. Setting DOWN also prevents
535 * tasks from rescheduling. 531 * tasks from rescheduling.
536 */ 532 */
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
627 * rounded up to the next 1KB and expressed in KB. Likewise, 623 * rounded up to the next 1KB and expressed in KB. Likewise,
628 * the Rx FIFO should be large enough to accommodate at least 624 * the Rx FIFO should be large enough to accommodate at least
629 * one full receive packet and is similarly rounded up and 625 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */ 626 * expressed in KB.
627 */
631 pba = er32(PBA); 628 pba = er32(PBA);
632 /* upper 16 bits has Tx packet buffer allocation size in KB */ 629 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16; 630 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */ 631 /* lower 16 bits has Rx packet buffer allocation size in KB */
635 pba &= 0xffff; 632 pba &= 0xffff;
636 /* 633 /* the Tx fifo also stores 16 bytes of information about the Tx
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it 634 * but don't include ethernet FCS because hardware appends it
639 */ 635 */
640 min_tx_space = (hw->max_frame_size + 636 min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
649 645
650 /* If current Tx allocation is less than the min Tx FIFO size, 646 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO 647 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */ 648 * allocation, take space away from current Rx allocation
649 */
653 if (tx_space < min_tx_space && 650 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) { 651 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space); 652 pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
663 break; 660 break;
664 } 661 }
665 662
666 /* if short on rx space, rx wins and must trump tx 663 /* if short on Rx space, Rx wins and must trump Tx
667 * adjustment or use Early Receive if available */ 664 * adjustment or use Early Receive if available
665 */
668 if (pba < min_rx_space) 666 if (pba < min_rx_space)
669 pba = min_rx_space; 667 pba = min_rx_space;
670 } 668 }
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
672 670
673 ew32(PBA, pba); 671 ew32(PBA, pba);
674 672
675 /* 673 /* flow control settings:
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame 674 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO. 675 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of: 676 * Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
707 u32 ctrl = er32(CTRL); 704 u32 ctrl = er32(CTRL);
708 /* clear phy power management bit if we are in gig only mode, 705 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which 706 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */ 707 * can cause a loss of link at power off or driver unload
708 */
711 ctrl &= ~E1000_CTRL_SWDPIN3; 709 ctrl &= ~E1000_CTRL_SWDPIN3;
712 ew32(CTRL, ctrl); 710 ew32(CTRL, ctrl);
713 } 711 }
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
808static netdev_features_t e1000_fix_features(struct net_device *netdev, 806static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features) 807 netdev_features_t features)
810{ 808{
811 /* 809 /* Since there is no support for separate Rx/Tx vlan accel
812 * Since there is no support for separate rx/tx vlan accel 810 * enable/disable make sure Tx flag is always in same state as Rx.
813 * enable/disable make sure tx flag is always in same state as rx.
814 */ 811 */
815 if (features & NETIF_F_HW_VLAN_RX) 812 if (features & NETIF_F_HW_VLAN_RX)
816 features |= NETIF_F_HW_VLAN_TX; 813 features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1012 if (err) 1009 if (err)
1013 goto err_sw_init; 1010 goto err_sw_init;
1014 1011
1015 /* 1012 /* there is a workaround being applied below that limits
1016 * there is a workaround being applied below that limits
1017 * 64-bit DMA addresses to 64-bit hardware. There are some 1013 * 64-bit DMA addresses to 64-bit hardware. There are some
1018 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 1014 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1019 */ 1015 */
1020 pci_using_dac = 0; 1016 pci_using_dac = 0;
1021 if ((hw->bus_type == e1000_bus_type_pcix) && 1017 if ((hw->bus_type == e1000_bus_type_pcix) &&
1022 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1018 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1023 /* 1019 /* according to DMA-API-HOWTO, coherent calls will always
1024 * according to DMA-API-HOWTO, coherent calls will always
1025 * succeed if the set call did 1020 * succeed if the set call did
1026 */ 1021 */
1027 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1022 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1099 } 1094 }
1100 1095
1101 /* before reading the EEPROM, reset the controller to 1096 /* before reading the EEPROM, reset the controller to
1102 * put the device in a known good starting state */ 1097 * put the device in a known good starting state
1098 */
1103 1099
1104 e1000_reset_hw(hw); 1100 e1000_reset_hw(hw);
1105 1101
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1107 if (e1000_validate_eeprom_checksum(hw) < 0) { 1103 if (e1000_validate_eeprom_checksum(hw) < 0) {
1108 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1104 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1109 e1000_dump_eeprom(adapter); 1105 e1000_dump_eeprom(adapter);
1110 /* 1106 /* set MAC address to all zeroes to invalidate and temporary
1111 * set MAC address to all zeroes to invalidate and temporary
1112 * disable this device for the user. This blocks regular 1107 * disable this device for the user. This blocks regular
1113 * traffic while still permitting ethtool ioctls from reaching 1108 * traffic while still permitting ethtool ioctls from reaching
1114 * the hardware as well as allowing the user to run the 1109 * the hardware as well as allowing the user to run the
@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1169 1164
1170 /* now that we have the eeprom settings, apply the special cases 1165 /* now that we have the eeprom settings, apply the special cases
1171 * where the eeprom may be wrong or the board simply won't support 1166 * where the eeprom may be wrong or the board simply won't support
1172 * wake on lan on a particular port */ 1167 * wake on lan on a particular port
1168 */
1173 switch (pdev->device) { 1169 switch (pdev->device) {
1174 case E1000_DEV_ID_82546GB_PCIE: 1170 case E1000_DEV_ID_82546GB_PCIE:
1175 adapter->eeprom_wol = 0; 1171 adapter->eeprom_wol = 0;
@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1177 case E1000_DEV_ID_82546EB_FIBER: 1173 case E1000_DEV_ID_82546EB_FIBER:
1178 case E1000_DEV_ID_82546GB_FIBER: 1174 case E1000_DEV_ID_82546GB_FIBER:
1179 /* Wake events only supported on port A for dual fiber 1175 /* Wake events only supported on port A for dual fiber
1180 * regardless of eeprom setting */ 1176 * regardless of eeprom setting
1177 */
1181 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1178 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1182 adapter->eeprom_wol = 0; 1179 adapter->eeprom_wol = 0;
1183 break; 1180 break;
@@ -1270,7 +1267,6 @@ err_pci_reg:
1270 * Hot-Plug event, or because the driver is going to be removed from 1267 * Hot-Plug event, or because the driver is going to be removed from
1271 * memory. 1268 * memory.
1272 **/ 1269 **/
1273
1274static void e1000_remove(struct pci_dev *pdev) 1270static void e1000_remove(struct pci_dev *pdev)
1275{ 1271{
1276 struct net_device *netdev = pci_get_drvdata(pdev); 1272 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
1306 * e1000_sw_init initializes the Adapter private data structure. 1302 * e1000_sw_init initializes the Adapter private data structure.
1307 * e1000_init_hw_struct MUST be called before this function 1303 * e1000_init_hw_struct MUST be called before this function
1308 **/ 1304 **/
1309
1310static int e1000_sw_init(struct e1000_adapter *adapter) 1305static int e1000_sw_init(struct e1000_adapter *adapter)
1311{ 1306{
1312 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1307 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
1337 * We allocate one ring per queue at run-time since we don't know the 1332 * We allocate one ring per queue at run-time since we don't know the
1338 * number of queues at compile-time. 1333 * number of queues at compile-time.
1339 **/ 1334 **/
1340
1341static int e1000_alloc_queues(struct e1000_adapter *adapter) 1335static int e1000_alloc_queues(struct e1000_adapter *adapter)
1342{ 1336{
1343 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1337 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
1367 * handler is registered with the OS, the watchdog task is started, 1361 * handler is registered with the OS, the watchdog task is started,
1368 * and the stack is notified that the interface is ready. 1362 * and the stack is notified that the interface is ready.
1369 **/ 1363 **/
1370
1371static int e1000_open(struct net_device *netdev) 1364static int e1000_open(struct net_device *netdev)
1372{ 1365{
1373 struct e1000_adapter *adapter = netdev_priv(netdev); 1366 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
1401 /* before we allocate an interrupt, we must be ready to handle it. 1394 /* before we allocate an interrupt, we must be ready to handle it.
1402 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1395 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1403 * as soon as we call pci_request_irq, so we have to setup our 1396 * as soon as we call pci_request_irq, so we have to setup our
1404 * clean_rx handler before we do so. */ 1397 * clean_rx handler before we do so.
1398 */
1405 e1000_configure(adapter); 1399 e1000_configure(adapter);
1406 1400
1407 err = e1000_request_irq(adapter); 1401 err = e1000_request_irq(adapter);
@@ -1444,7 +1438,6 @@ err_setup_tx:
1444 * needs to be disabled. A global MAC reset is issued to stop the 1438 * needs to be disabled. A global MAC reset is issued to stop the
1445 * hardware, and all transmit and receive resources are freed. 1439 * hardware, and all transmit and receive resources are freed.
1446 **/ 1440 **/
1447
1448static int e1000_close(struct net_device *netdev) 1441static int e1000_close(struct net_device *netdev)
1449{ 1442{
1450 struct e1000_adapter *adapter = netdev_priv(netdev); 1443 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
1459 e1000_free_all_rx_resources(adapter); 1452 e1000_free_all_rx_resources(adapter);
1460 1453
1461 /* kill manageability vlan ID if supported, but not if a vlan with 1454 /* kill manageability vlan ID if supported, but not if a vlan with
1462 * the same ID is registered on the host OS (let 8021q kill it) */ 1455 * the same ID is registered on the host OS (let 8021q kill it)
1456 */
1463 if ((hw->mng_cookie.status & 1457 if ((hw->mng_cookie.status &
1464 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1458 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1465 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1459 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1466 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1460 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1467 } 1461 }
1468 1462
@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1483 unsigned long end = begin + len; 1477 unsigned long end = begin + len;
1484 1478
1485 /* First rev 82545 and 82546 need to not allow any memory 1479 /* First rev 82545 and 82546 need to not allow any memory
1486 * write location to cross 64k boundary due to errata 23 */ 1480 * write location to cross 64k boundary due to errata 23
1481 */
1487 if (hw->mac_type == e1000_82545 || 1482 if (hw->mac_type == e1000_82545 ||
1488 hw->mac_type == e1000_ce4100 || 1483 hw->mac_type == e1000_ce4100 ||
1489 hw->mac_type == e1000_82546) { 1484 hw->mac_type == e1000_82546) {
@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1500 * 1495 *
1501 * Return 0 on success, negative on failure 1496 * Return 0 on success, negative on failure
1502 **/ 1497 **/
1503
1504static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1498static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1505 struct e1000_tx_ring *txdr) 1499 struct e1000_tx_ring *txdr)
1506{ 1500{
@@ -1574,7 +1568,6 @@ setup_tx_desc_die:
1574 * 1568 *
1575 * Return 0 on success, negative on failure 1569 * Return 0 on success, negative on failure
1576 **/ 1570 **/
1577
1578int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1571int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1579{ 1572{
1580 int i, err = 0; 1573 int i, err = 0;
@@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1599 * 1592 *
1600 * Configure the Tx unit of the MAC after a reset. 1593 * Configure the Tx unit of the MAC after a reset.
1601 **/ 1594 **/
1602
1603static void e1000_configure_tx(struct e1000_adapter *adapter) 1595static void e1000_configure_tx(struct e1000_adapter *adapter)
1604{ 1596{
1605 u64 tdba; 1597 u64 tdba;
@@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1620 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1612 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1621 ew32(TDT, 0); 1613 ew32(TDT, 0);
1622 ew32(TDH, 0); 1614 ew32(TDH, 0);
1623 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1615 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1624 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1616 E1000_TDH : E1000_82542_TDH);
1617 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1618 E1000_TDT : E1000_82542_TDT);
1625 break; 1619 break;
1626 } 1620 }
1627 1621
@@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1676 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1670 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1677 1671
1678 /* Cache if we're 82544 running in PCI-X because we'll 1672 /* Cache if we're 82544 running in PCI-X because we'll
1679 * need this to apply a workaround later in the send path. */ 1673 * need this to apply a workaround later in the send path.
1674 */
1680 if (hw->mac_type == e1000_82544 && 1675 if (hw->mac_type == e1000_82544 &&
1681 hw->bus_type == e1000_bus_type_pcix) 1676 hw->bus_type == e1000_bus_type_pcix)
1682 adapter->pcix_82544 = true; 1677 adapter->pcix_82544 = true;
@@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1692 * 1687 *
1693 * Returns 0 on success, negative on failure 1688 * Returns 0 on success, negative on failure
1694 **/ 1689 **/
1695
1696static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1690static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1697 struct e1000_rx_ring *rxdr) 1691 struct e1000_rx_ring *rxdr)
1698{ 1692{
@@ -1771,7 +1765,6 @@ setup_rx_desc_die:
1771 * 1765 *
1772 * Return 0 on success, negative on failure 1766 * Return 0 on success, negative on failure
1773 **/ 1767 **/
1774
1775int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1768int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1776{ 1769{
1777 int i, err = 0; 1770 int i, err = 0;
@@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1840 /* This is useful for sniffing bad packets. */ 1833 /* This is useful for sniffing bad packets. */
1841 if (adapter->netdev->features & NETIF_F_RXALL) { 1834 if (adapter->netdev->features & NETIF_F_RXALL) {
1842 /* UPE and MPE will be handled by normal PROMISC logic 1835 /* UPE and MPE will be handled by normal PROMISC logic
1843 * in e1000e_set_rx_mode */ 1836 * in e1000e_set_rx_mode
1837 */
1844 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 1838 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1845 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 1839 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1846 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 1840 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1862 * 1856 *
1863 * Configure the Rx unit of the MAC after a reset. 1857 * Configure the Rx unit of the MAC after a reset.
1864 **/ 1858 **/
1865
1866static void e1000_configure_rx(struct e1000_adapter *adapter) 1859static void e1000_configure_rx(struct e1000_adapter *adapter)
1867{ 1860{
1868 u64 rdba; 1861 u64 rdba;
@@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1895 } 1888 }
1896 1889
1897 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1890 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1898 * the Base and Length of the Rx Descriptor Ring */ 1891 * the Base and Length of the Rx Descriptor Ring
1892 */
1899 switch (adapter->num_rx_queues) { 1893 switch (adapter->num_rx_queues) {
1900 case 1: 1894 case 1:
1901 default: 1895 default:
@@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1905 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1899 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1906 ew32(RDT, 0); 1900 ew32(RDT, 0);
1907 ew32(RDH, 0); 1901 ew32(RDH, 0);
1908 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 1902 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1909 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 1903 E1000_RDH : E1000_82542_RDH);
1904 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1905 E1000_RDT : E1000_82542_RDT);
1910 break; 1906 break;
1911 } 1907 }
1912 1908
@@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1932 * 1928 *
1933 * Free all transmit software resources 1929 * Free all transmit software resources
1934 **/ 1930 **/
1935
1936static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1931static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1937 struct e1000_tx_ring *tx_ring) 1932 struct e1000_tx_ring *tx_ring)
1938{ 1933{
@@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1955 * 1950 *
1956 * Free all transmit software resources 1951 * Free all transmit software resources
1957 **/ 1952 **/
1958
1959void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1953void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1960{ 1954{
1961 int i; 1955 int i;
@@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1990 * @adapter: board private structure 1984 * @adapter: board private structure
1991 * @tx_ring: ring to be cleaned 1985 * @tx_ring: ring to be cleaned
1992 **/ 1986 **/
1993
1994static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1987static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1995 struct e1000_tx_ring *tx_ring) 1988 struct e1000_tx_ring *tx_ring)
1996{ 1989{
@@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2026 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2019 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2027 * @adapter: board private structure 2020 * @adapter: board private structure
2028 **/ 2021 **/
2029
2030static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2022static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2031{ 2023{
2032 int i; 2024 int i;
@@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2042 * 2034 *
2043 * Free all receive software resources 2035 * Free all receive software resources
2044 **/ 2036 **/
2045
2046static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2037static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2047 struct e1000_rx_ring *rx_ring) 2038 struct e1000_rx_ring *rx_ring)
2048{ 2039{
@@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2065 * 2056 *
2066 * Free all receive software resources 2057 * Free all receive software resources
2067 **/ 2058 **/
2068
2069void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2059void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2070{ 2060{
2071 int i; 2061 int i;
@@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2079 * @adapter: board private structure 2069 * @adapter: board private structure
2080 * @rx_ring: ring to free buffers from 2070 * @rx_ring: ring to free buffers from
2081 **/ 2071 **/
2082
2083static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2072static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2084 struct e1000_rx_ring *rx_ring) 2073 struct e1000_rx_ring *rx_ring)
2085{ 2074{
@@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2138 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2127 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2139 * @adapter: board private structure 2128 * @adapter: board private structure
2140 **/ 2129 **/
2141
2142static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2130static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2143{ 2131{
2144 int i; 2132 int i;
@@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2198 * 2186 *
2199 * Returns 0 on success, negative on failure 2187 * Returns 0 on success, negative on failure
2200 **/ 2188 **/
2201
2202static int e1000_set_mac(struct net_device *netdev, void *p) 2189static int e1000_set_mac(struct net_device *netdev, void *p)
2203{ 2190{
2204 struct e1000_adapter *adapter = netdev_priv(netdev); 2191 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2233 * responsible for configuring the hardware for proper unicast, multicast, 2220 * responsible for configuring the hardware for proper unicast, multicast,
2234 * promiscuous mode, and all-multi behavior. 2221 * promiscuous mode, and all-multi behavior.
2235 **/ 2222 **/
2236
2237static void e1000_set_rx_mode(struct net_device *netdev) 2223static void e1000_set_rx_mode(struct net_device *netdev)
2238{ 2224{
2239 struct e1000_adapter *adapter = netdev_priv(netdev); 2225 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2317 } 2303 }
2318 2304
2319 /* write the hash table completely, write from bottom to avoid 2305 /* write the hash table completely, write from bottom to avoid
2320 * both stupid write combining chipsets, and flushing each write */ 2306 * both stupid write combining chipsets, and flushing each write
2307 */
2321 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2308 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2322 /* 2309 /* If we are on an 82544 has an errata where writing odd
2323 * If we are on an 82544 has an errata where writing odd
2324 * offsets overwrites the previous even offset, but writing 2310 * offsets overwrites the previous even offset, but writing
2325 * backwards over the range solves the issue by always 2311 * backwards over the range solves the issue by always
2326 * writing the odd offset first 2312 * writing the odd offset first
@@ -2458,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
2458 bool txb2b = true; 2444 bool txb2b = true;
2459 /* update snapshot of PHY registers on LSC */ 2445 /* update snapshot of PHY registers on LSC */
2460 e1000_get_speed_and_duplex(hw, 2446 e1000_get_speed_and_duplex(hw,
2461 &adapter->link_speed, 2447 &adapter->link_speed,
2462 &adapter->link_duplex); 2448 &adapter->link_duplex);
2463 2449
2464 ctrl = er32(CTRL); 2450 ctrl = er32(CTRL);
2465 pr_info("%s NIC Link is Up %d Mbps %s, " 2451 pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2533,7 +2519,8 @@ link_up:
2533 /* We've lost link, so the controller stops DMA, 2519 /* We've lost link, so the controller stops DMA,
2534 * but we've got queued Tx work that's never going 2520 * but we've got queued Tx work that's never going
2535 * to get done, so reset controller to flush Tx. 2521 * to get done, so reset controller to flush Tx.
2536 * (Do the reset outside of interrupt context). */ 2522 * (Do the reset outside of interrupt context).
2523 */
2537 adapter->tx_timeout_count++; 2524 adapter->tx_timeout_count++;
2538 schedule_work(&adapter->reset_task); 2525 schedule_work(&adapter->reset_task);
2539 /* exit immediately since reset is imminent */ 2526 /* exit immediately since reset is imminent */
@@ -2543,8 +2530,7 @@ link_up:
2543 2530
2544 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2531 /* Simple mode for Interrupt Throttle Rate (ITR) */
2545 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2532 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2546 /* 2533 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2547 * Symmetric Tx/Rx gets a reduced ITR=2000;
2548 * Total asymmetrical Tx or Rx gets ITR=8000; 2534 * Total asymmetrical Tx or Rx gets ITR=8000;
2549 * everyone else is between 2000-8000. 2535 * everyone else is between 2000-8000.
2550 */ 2536 */
@@ -2659,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2659 goto set_itr_now; 2645 goto set_itr_now;
2660 } 2646 }
2661 2647
2662 adapter->tx_itr = e1000_update_itr(adapter, 2648 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2663 adapter->tx_itr, 2649 adapter->total_tx_packets,
2664 adapter->total_tx_packets, 2650 adapter->total_tx_bytes);
2665 adapter->total_tx_bytes);
2666 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2651 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2667 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2652 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2668 adapter->tx_itr = low_latency; 2653 adapter->tx_itr = low_latency;
2669 2654
2670 adapter->rx_itr = e1000_update_itr(adapter, 2655 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2671 adapter->rx_itr, 2656 adapter->total_rx_packets,
2672 adapter->total_rx_packets, 2657 adapter->total_rx_bytes);
2673 adapter->total_rx_bytes);
2674 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2658 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2675 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2659 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2676 adapter->rx_itr = low_latency; 2660 adapter->rx_itr = low_latency;
@@ -2696,10 +2680,11 @@ set_itr_now:
2696 if (new_itr != adapter->itr) { 2680 if (new_itr != adapter->itr) {
2697 /* this attempts to bias the interrupt rate towards Bulk 2681 /* this attempts to bias the interrupt rate towards Bulk
2698 * by adding intermediate steps when interrupt rate is 2682 * by adding intermediate steps when interrupt rate is
2699 * increasing */ 2683 * increasing
2684 */
2700 new_itr = new_itr > adapter->itr ? 2685 new_itr = new_itr > adapter->itr ?
2701 min(adapter->itr + (new_itr >> 2), new_itr) : 2686 min(adapter->itr + (new_itr >> 2), new_itr) :
2702 new_itr; 2687 new_itr;
2703 adapter->itr = new_itr; 2688 adapter->itr = new_itr;
2704 ew32(ITR, 1000000000 / (new_itr * 256)); 2689 ew32(ITR, 1000000000 / (new_itr * 256));
2705 } 2690 }
@@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2861 /* Workaround for Controller erratum -- 2846 /* Workaround for Controller erratum --
2862 * descriptor for non-tso packet in a linear SKB that follows a 2847 * descriptor for non-tso packet in a linear SKB that follows a
2863 * tso gets written back prematurely before the data is fully 2848 * tso gets written back prematurely before the data is fully
2864 * DMA'd to the controller */ 2849 * DMA'd to the controller
2850 */
2865 if (!skb->data_len && tx_ring->last_tx_tso && 2851 if (!skb->data_len && tx_ring->last_tx_tso &&
2866 !skb_is_gso(skb)) { 2852 !skb_is_gso(skb)) {
2867 tx_ring->last_tx_tso = false; 2853 tx_ring->last_tx_tso = false;
@@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2869 } 2855 }
2870 2856
2871 /* Workaround for premature desc write-backs 2857 /* Workaround for premature desc write-backs
2872 * in TSO mode. Append 4-byte sentinel desc */ 2858 * in TSO mode. Append 4-byte sentinel desc
2859 */
2873 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2860 if (unlikely(mss && !nr_frags && size == len && size > 8))
2874 size -= 4; 2861 size -= 4;
2875 /* work-around for errata 10 and it applies 2862 /* work-around for errata 10 and it applies
@@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2882 size = 2015; 2869 size = 2015;
2883 2870
2884 /* Workaround for potential 82544 hang in PCI-X. Avoid 2871 /* Workaround for potential 82544 hang in PCI-X. Avoid
2885 * terminating buffers within evenly-aligned dwords. */ 2872 * terminating buffers within evenly-aligned dwords.
2873 */
2886 if (unlikely(adapter->pcix_82544 && 2874 if (unlikely(adapter->pcix_82544 &&
2887 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2875 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2888 size > 4)) 2876 size > 4))
@@ -2894,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2894 buffer_info->mapped_as_page = false; 2882 buffer_info->mapped_as_page = false;
2895 buffer_info->dma = dma_map_single(&pdev->dev, 2883 buffer_info->dma = dma_map_single(&pdev->dev,
2896 skb->data + offset, 2884 skb->data + offset,
2897 size, DMA_TO_DEVICE); 2885 size, DMA_TO_DEVICE);
2898 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2886 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2899 goto dma_error; 2887 goto dma_error;
2900 buffer_info->next_to_watch = i; 2888 buffer_info->next_to_watch = i;
@@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2925 buffer_info = &tx_ring->buffer_info[i]; 2913 buffer_info = &tx_ring->buffer_info[i];
2926 size = min(len, max_per_txd); 2914 size = min(len, max_per_txd);
2927 /* Workaround for premature desc write-backs 2915 /* Workaround for premature desc write-backs
2928 * in TSO mode. Append 4-byte sentinel desc */ 2916 * in TSO mode. Append 4-byte sentinel desc
2929 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2917 */
2918 if (unlikely(mss && f == (nr_frags-1) &&
2919 size == len && size > 8))
2930 size -= 4; 2920 size -= 4;
2931 /* Workaround for potential 82544 hang in PCI-X. 2921 /* Workaround for potential 82544 hang in PCI-X.
2932 * Avoid terminating buffers within evenly-aligned 2922 * Avoid terminating buffers within evenly-aligned
2933 * dwords. */ 2923 * dwords.
2924 */
2934 bufend = (unsigned long) 2925 bufend = (unsigned long)
2935 page_to_phys(skb_frag_page(frag)); 2926 page_to_phys(skb_frag_page(frag));
2936 bufend += offset + size - 1; 2927 bufend += offset + size - 1;
@@ -2994,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
2994 2985
2995 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2986 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2996 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2987 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2997 E1000_TXD_CMD_TSE; 2988 E1000_TXD_CMD_TSE;
2998 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2989 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2999 2990
3000 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2991 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3035 /* Force memory writes to complete before letting h/w 3026 /* Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only 3027 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs, 3028 * applicable for weak-ordered memory model archs,
3038 * such as IA-64). */ 3029 * such as IA-64).
3030 */
3039 wmb(); 3031 wmb();
3040 3032
3041 tx_ring->next_to_use = i; 3033 tx_ring->next_to_use = i;
3042 writel(i, hw->hw_addr + tx_ring->tdt); 3034 writel(i, hw->hw_addr + tx_ring->tdt);
3043 /* we need this if more than one processor can write to our tail 3035 /* we need this if more than one processor can write to our tail
3044 * at a time, it syncronizes IO on IA64/Altix systems */ 3036 * at a time, it synchronizes IO on IA64/Altix systems
3037 */
3045 mmiowb(); 3038 mmiowb();
3046} 3039}
3047 3040
@@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3090 netif_stop_queue(netdev); 3083 netif_stop_queue(netdev);
3091 /* Herbert's original patch had: 3084 /* Herbert's original patch had:
3092 * smp_mb__after_netif_stop_queue(); 3085 * smp_mb__after_netif_stop_queue();
3093 * but since that doesn't exist yet, just open code it. */ 3086 * but since that doesn't exist yet, just open code it.
3087 */
3094 smp_mb(); 3088 smp_mb();
3095 3089
3096 /* We need to check again in a case another CPU has just 3090 /* We need to check again in a case another CPU has just
3097 * made room available. */ 3091 * made room available.
3092 */
3098 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3093 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3099 return -EBUSY; 3094 return -EBUSY;
3100 3095
@@ -3105,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3105} 3100}
3106 3101
3107static int e1000_maybe_stop_tx(struct net_device *netdev, 3102static int e1000_maybe_stop_tx(struct net_device *netdev,
3108 struct e1000_tx_ring *tx_ring, int size) 3103 struct e1000_tx_ring *tx_ring, int size)
3109{ 3104{
3110 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3105 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3111 return 0; 3106 return 0;
@@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3129 int tso; 3124 int tso;
3130 unsigned int f; 3125 unsigned int f;
3131 3126
3132 /* This goes back to the question of how to logically map a tx queue 3127 /* This goes back to the question of how to logically map a Tx queue
3133 * to a flow. Right now, performance is impacted slightly negatively 3128 * to a flow. Right now, performance is impacted slightly negatively
3134 * if using multiple tx queues. If the stack breaks away from a 3129 * if using multiple Tx queues. If the stack breaks away from a
3135 * single qdisc implementation, we can look at this again. */ 3130 * single qdisc implementation, we can look at this again.
3131 */
3136 tx_ring = adapter->tx_ring; 3132 tx_ring = adapter->tx_ring;
3137 3133
3138 if (unlikely(skb->len <= 0)) { 3134 if (unlikely(skb->len <= 0)) {
@@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3157 * initiating the DMA for each buffer. The calc is: 3153 * initiating the DMA for each buffer. The calc is:
3158 * 4 = ceil(buffer len/mss). To make sure we don't 3154 * 4 = ceil(buffer len/mss). To make sure we don't
3159 * overrun the FIFO, adjust the max buffer len if mss 3155 * overrun the FIFO, adjust the max buffer len if mss
3160 * drops. */ 3156 * drops.
3157 */
3161 if (mss) { 3158 if (mss) {
3162 u8 hdr_len; 3159 u8 hdr_len;
3163 max_per_txd = min(mss << 2, max_per_txd); 3160 max_per_txd = min(mss << 2, max_per_txd);
@@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3173 * this hardware's requirements 3170 * this hardware's requirements
3174 * NOTE: this is a TSO only workaround 3171 * NOTE: this is a TSO only workaround
3175 * if end byte alignment not correct move us 3172 * if end byte alignment not correct move us
3176 * into the next dword */ 3173 * into the next dword
3177 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 3174 */
3175 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3176 & 4)
3178 break; 3177 break;
3179 /* fall through */ 3178 /* fall through */
3180 pull_size = min((unsigned int)4, skb->data_len); 3179 pull_size = min((unsigned int)4, skb->data_len);
@@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3222 count += nr_frags; 3221 count += nr_frags;
3223 3222
3224 /* need: count + 2 desc gap to keep tail from touching 3223 /* need: count + 2 desc gap to keep tail from touching
3225 * head, otherwise try next time */ 3224 * head, otherwise try next time
3225 */
3226 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3226 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3227 return NETDEV_TX_BUSY; 3227 return NETDEV_TX_BUSY;
3228 3228
@@ -3261,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3261 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3261 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3262 3262
3263 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3263 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3264 nr_frags, mss); 3264 nr_frags, mss);
3265 3265
3266 if (count) { 3266 if (count) {
3267 netdev_sent_queue(netdev, skb->len); 3267 netdev_sent_queue(netdev, skb->len);
@@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
3363 /* Print Registers */ 3363 /* Print Registers */
3364 e1000_regdump(adapter); 3364 e1000_regdump(adapter);
3365 3365
3366 /* 3366 /* transmit dump */
3367 * transmit dump
3368 */
3369 pr_info("TX Desc ring0 dump\n"); 3367 pr_info("TX Desc ring0 dump\n");
3370 3368
3371 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3369 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
3426 } 3424 }
3427 3425
3428rx_ring_summary: 3426rx_ring_summary:
3429 /* 3427 /* receive dump */
3430 * receive dump
3431 */
3432 pr_info("\nRX Desc ring dump\n"); 3428 pr_info("\nRX Desc ring dump\n");
3433 3429
3434 /* Legacy Receive Descriptor Format 3430 /* Legacy Receive Descriptor Format
@@ -3493,7 +3489,6 @@ exit:
3493 * e1000_tx_timeout - Respond to a Tx Hang 3489 * e1000_tx_timeout - Respond to a Tx Hang
3494 * @netdev: network interface device structure 3490 * @netdev: network interface device structure
3495 **/ 3491 **/
3496
3497static void e1000_tx_timeout(struct net_device *netdev) 3492static void e1000_tx_timeout(struct net_device *netdev)
3498{ 3493{
3499 struct e1000_adapter *adapter = netdev_priv(netdev); 3494 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
3521 * Returns the address of the device statistics structure. 3516 * Returns the address of the device statistics structure.
3522 * The statistics are actually updated from the watchdog. 3517 * The statistics are actually updated from the watchdog.
3523 **/ 3518 **/
3524
3525static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3519static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3526{ 3520{
3527 /* only return the current stats */ 3521 /* only return the current stats */
@@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3535 * 3529 *
3536 * Returns 0 on success, negative on failure 3530 * Returns 0 on success, negative on failure
3537 **/ 3531 **/
3538
3539static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3532static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3540{ 3533{
3541 struct e1000_adapter *adapter = netdev_priv(netdev); 3534 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3572,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3572 * means we reserve 2 more, this pushes us to allocate from the next 3565 * means we reserve 2 more, this pushes us to allocate from the next
3573 * larger slab size. 3566 * larger slab size.
3574 * i.e. RXBUFFER_2048 --> size-4096 slab 3567 * i.e. RXBUFFER_2048 --> size-4096 slab
3575 * however with the new *_jumbo_rx* routines, jumbo receives will use 3568 * however with the new *_jumbo_rx* routines, jumbo receives will use
3576 * fragmented skbs */ 3569 * fragmented skbs
3570 */
3577 3571
3578 if (max_frame <= E1000_RXBUFFER_2048) 3572 if (max_frame <= E1000_RXBUFFER_2048)
3579 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3573 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3608 * e1000_update_stats - Update the board statistics counters 3602 * e1000_update_stats - Update the board statistics counters
3609 * @adapter: board private structure 3603 * @adapter: board private structure
3610 **/ 3604 **/
3611
3612void e1000_update_stats(struct e1000_adapter *adapter) 3605void e1000_update_stats(struct e1000_adapter *adapter)
3613{ 3606{
3614 struct net_device *netdev = adapter->netdev; 3607 struct net_device *netdev = adapter->netdev;
@@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3619 3612
3620#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3613#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3621 3614
3622 /* 3615 /* Prevent stats update while adapter is being reset, or if the pci
3623 * Prevent stats update while adapter is being reset, or if the pci
3624 * connection is down. 3616 * connection is down.
3625 */ 3617 */
3626 if (adapter->link_speed == 0) 3618 if (adapter->link_speed == 0)
@@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3710 /* Rx Errors */ 3702 /* Rx Errors */
3711 3703
3712 /* RLEC on some newer hardware can be incorrect so build 3704 /* RLEC on some newer hardware can be incorrect so build
3713 * our own version based on RUC and ROC */ 3705 * our own version based on RUC and ROC
3706 */
3714 netdev->stats.rx_errors = adapter->stats.rxerrc + 3707 netdev->stats.rx_errors = adapter->stats.rxerrc +
3715 adapter->stats.crcerrs + adapter->stats.algnerrc + 3708 adapter->stats.crcerrs + adapter->stats.algnerrc +
3716 adapter->stats.ruc + adapter->stats.roc + 3709 adapter->stats.ruc + adapter->stats.roc +
@@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3764 * @irq: interrupt number 3757 * @irq: interrupt number
3765 * @data: pointer to a network interface device structure 3758 * @data: pointer to a network interface device structure
3766 **/ 3759 **/
3767
3768static irqreturn_t e1000_intr(int irq, void *data) 3760static irqreturn_t e1000_intr(int irq, void *data)
3769{ 3761{
3770 struct net_device *netdev = data; 3762 struct net_device *netdev = data;
@@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
3775 if (unlikely((!icr))) 3767 if (unlikely((!icr)))
3776 return IRQ_NONE; /* Not our interrupt */ 3768 return IRQ_NONE; /* Not our interrupt */
3777 3769
3778 /* 3770 /* we might have caused the interrupt, but the above
3779 * we might have caused the interrupt, but the above
3780 * read cleared it, and just in case the driver is 3771 * read cleared it, and just in case the driver is
3781 * down there is nothing to do so return handled 3772 * down there is nothing to do so return handled
3782 */ 3773 */
@@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
3802 __napi_schedule(&adapter->napi); 3793 __napi_schedule(&adapter->napi);
3803 } else { 3794 } else {
3804 /* this really should not happen! if it does it is basically a 3795 /* this really should not happen! if it does it is basically a
3805 * bug, but not a hard error, so enable ints and continue */ 3796 * bug, but not a hard error, so enable ints and continue
3797 */
3806 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3798 if (!test_bit(__E1000_DOWN, &adapter->flags))
3807 e1000_irq_enable(adapter); 3799 e1000_irq_enable(adapter);
3808 } 3800 }
@@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
3816 **/ 3808 **/
3817static int e1000_clean(struct napi_struct *napi, int budget) 3809static int e1000_clean(struct napi_struct *napi, int budget)
3818{ 3810{
3819 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3811 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3812 napi);
3820 int tx_clean_complete = 0, work_done = 0; 3813 int tx_clean_complete = 0, work_done = 0;
3821 3814
3822 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3815 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3907,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3907 3900
3908 if (adapter->detect_tx_hung) { 3901 if (adapter->detect_tx_hung) {
3909 /* Detect a transmit hang in hardware, this serializes the 3902 /* Detect a transmit hang in hardware, this serializes the
3910 * check with the clearing of time_stamp and movement of i */ 3903 * check with the clearing of time_stamp and movement of i
3904 */
3911 adapter->detect_tx_hung = false; 3905 adapter->detect_tx_hung = false;
3912 if (tx_ring->buffer_info[eop].time_stamp && 3906 if (tx_ring->buffer_info[eop].time_stamp &&
3913 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3907 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3914 (adapter->tx_timeout_factor * HZ)) && 3908 (adapter->tx_timeout_factor * HZ)) &&
3915 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3909 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3916 3910
3917 /* detected Tx unit hang */ 3911 /* detected Tx unit hang */
@@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3954 * @csum: receive descriptor csum field 3948 * @csum: receive descriptor csum field
3955 * @sk_buff: socket buffer with received data 3949 * @sk_buff: socket buffer with received data
3956 **/ 3950 **/
3957
3958static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3951static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3959 u32 csum, struct sk_buff *skb) 3952 u32 csum, struct sk_buff *skb)
3960{ 3953{
@@ -3990,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3990 * e1000_consume_page - helper function 3983 * e1000_consume_page - helper function
3991 **/ 3984 **/
3992static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 3985static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3993 u16 length) 3986 u16 length)
3994{ 3987{
3995 bi->page = NULL; 3988 bi->page = NULL;
3996 skb->len += length; 3989 skb->len += length;
@@ -4086,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4086 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 4079 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4087 last_byte)) { 4080 last_byte)) {
4088 spin_lock_irqsave(&adapter->stats_lock, 4081 spin_lock_irqsave(&adapter->stats_lock,
4089 irq_flags); 4082 irq_flags);
4090 e1000_tbi_adjust_stats(hw, &adapter->stats, 4083 e1000_tbi_adjust_stats(hw, &adapter->stats,
4091 length, mapped); 4084 length, mapped);
4092 spin_unlock_irqrestore(&adapter->stats_lock, 4085 spin_unlock_irqrestore(&adapter->stats_lock,
4093 irq_flags); 4086 irq_flags);
4094 length--; 4087 length--;
4095 } else { 4088 } else {
4096 if (netdev->features & NETIF_F_RXALL) 4089 if (netdev->features & NETIF_F_RXALL)
@@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4098 /* recycle both page and skb */ 4091 /* recycle both page and skb */
4099 buffer_info->skb = skb; 4092 buffer_info->skb = skb;
4100 /* an error means any chain goes out the window 4093 /* an error means any chain goes out the window
4101 * too */ 4094 * too
4095 */
4102 if (rx_ring->rx_skb_top) 4096 if (rx_ring->rx_skb_top)
4103 dev_kfree_skb(rx_ring->rx_skb_top); 4097 dev_kfree_skb(rx_ring->rx_skb_top);
4104 rx_ring->rx_skb_top = NULL; 4098 rx_ring->rx_skb_top = NULL;
@@ -4114,7 +4108,7 @@ process_skb:
4114 /* this is the beginning of a chain */ 4108 /* this is the beginning of a chain */
4115 rxtop = skb; 4109 rxtop = skb;
4116 skb_fill_page_desc(rxtop, 0, buffer_info->page, 4110 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4117 0, length); 4111 0, length);
4118 } else { 4112 } else {
4119 /* this is the middle of a chain */ 4113 /* this is the middle of a chain */
4120 skb_fill_page_desc(rxtop, 4114 skb_fill_page_desc(rxtop,
@@ -4132,38 +4126,42 @@ process_skb:
4132 skb_shinfo(rxtop)->nr_frags, 4126 skb_shinfo(rxtop)->nr_frags,
4133 buffer_info->page, 0, length); 4127 buffer_info->page, 0, length);
4134 /* re-use the current skb, we only consumed the 4128 /* re-use the current skb, we only consumed the
4135 * page */ 4129 * page
4130 */
4136 buffer_info->skb = skb; 4131 buffer_info->skb = skb;
4137 skb = rxtop; 4132 skb = rxtop;
4138 rxtop = NULL; 4133 rxtop = NULL;
4139 e1000_consume_page(buffer_info, skb, length); 4134 e1000_consume_page(buffer_info, skb, length);
4140 } else { 4135 } else {
4141 /* no chain, got EOP, this buf is the packet 4136 /* no chain, got EOP, this buf is the packet
4142 * copybreak to save the put_page/alloc_page */ 4137 * copybreak to save the put_page/alloc_page
4138 */
4143 if (length <= copybreak && 4139 if (length <= copybreak &&
4144 skb_tailroom(skb) >= length) { 4140 skb_tailroom(skb) >= length) {
4145 u8 *vaddr; 4141 u8 *vaddr;
4146 vaddr = kmap_atomic(buffer_info->page); 4142 vaddr = kmap_atomic(buffer_info->page);
4147 memcpy(skb_tail_pointer(skb), vaddr, length); 4143 memcpy(skb_tail_pointer(skb), vaddr,
4144 length);
4148 kunmap_atomic(vaddr); 4145 kunmap_atomic(vaddr);
4149 /* re-use the page, so don't erase 4146 /* re-use the page, so don't erase
4150 * buffer_info->page */ 4147 * buffer_info->page
4148 */
4151 skb_put(skb, length); 4149 skb_put(skb, length);
4152 } else { 4150 } else {
4153 skb_fill_page_desc(skb, 0, 4151 skb_fill_page_desc(skb, 0,
4154 buffer_info->page, 0, 4152 buffer_info->page, 0,
4155 length); 4153 length);
4156 e1000_consume_page(buffer_info, skb, 4154 e1000_consume_page(buffer_info, skb,
4157 length); 4155 length);
4158 } 4156 }
4159 } 4157 }
4160 } 4158 }
4161 4159
4162 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4160 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4163 e1000_rx_checksum(adapter, 4161 e1000_rx_checksum(adapter,
4164 (u32)(status) | 4162 (u32)(status) |
4165 ((u32)(rx_desc->errors) << 24), 4163 ((u32)(rx_desc->errors) << 24),
4166 le16_to_cpu(rx_desc->csum), skb); 4164 le16_to_cpu(rx_desc->csum), skb);
4167 4165
4168 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4166 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4169 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4167 if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4205,8 +4203,7 @@ next_desc:
4205 return cleaned; 4203 return cleaned;
4206} 4204}
4207 4205
4208/* 4206/* this should improve performance for small packets with large amounts
4209 * this should improve performance for small packets with large amounts
4210 * of reassembly being done in the stack 4207 * of reassembly being done in the stack
4211 */ 4208 */
4212static void e1000_check_copybreak(struct net_device *netdev, 4209static void e1000_check_copybreak(struct net_device *netdev,
@@ -4310,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4310 last_byte)) { 4307 last_byte)) {
4311 spin_lock_irqsave(&adapter->stats_lock, flags); 4308 spin_lock_irqsave(&adapter->stats_lock, flags);
4312 e1000_tbi_adjust_stats(hw, &adapter->stats, 4309 e1000_tbi_adjust_stats(hw, &adapter->stats,
4313 length, skb->data); 4310 length, skb->data);
4314 spin_unlock_irqrestore(&adapter->stats_lock, 4311 spin_unlock_irqrestore(&adapter->stats_lock,
4315 flags); 4312 flags);
4316 length--; 4313 length--;
4317 } else { 4314 } else {
4318 if (netdev->features & NETIF_F_RXALL) 4315 if (netdev->features & NETIF_F_RXALL)
@@ -4377,10 +4374,9 @@ next_desc:
4377 * @rx_ring: pointer to receive ring structure 4374 * @rx_ring: pointer to receive ring structure
4378 * @cleaned_count: number of buffers to allocate this pass 4375 * @cleaned_count: number of buffers to allocate this pass
4379 **/ 4376 **/
4380
4381static void 4377static void
4382e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4378e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4383 struct e1000_rx_ring *rx_ring, int cleaned_count) 4379 struct e1000_rx_ring *rx_ring, int cleaned_count)
4384{ 4380{
4385 struct net_device *netdev = adapter->netdev; 4381 struct net_device *netdev = adapter->netdev;
4386 struct pci_dev *pdev = adapter->pdev; 4382 struct pci_dev *pdev = adapter->pdev;
@@ -4421,7 +4417,7 @@ check_page:
4421 4417
4422 if (!buffer_info->dma) { 4418 if (!buffer_info->dma) {
4423 buffer_info->dma = dma_map_page(&pdev->dev, 4419 buffer_info->dma = dma_map_page(&pdev->dev,
4424 buffer_info->page, 0, 4420 buffer_info->page, 0,
4425 buffer_info->length, 4421 buffer_info->length,
4426 DMA_FROM_DEVICE); 4422 DMA_FROM_DEVICE);
4427 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4423 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4451,7 +4447,8 @@ check_page:
4451 /* Force memory writes to complete before letting h/w 4447 /* Force memory writes to complete before letting h/w
4452 * know there are new descriptors to fetch. (Only 4448 * know there are new descriptors to fetch. (Only
4453 * applicable for weak-ordered memory model archs, 4449 * applicable for weak-ordered memory model archs,
4454 * such as IA-64). */ 4450 * such as IA-64).
4451 */
4455 wmb(); 4452 wmb();
4456 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4453 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4457 } 4454 }
@@ -4461,7 +4458,6 @@ check_page:
4461 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4458 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4462 * @adapter: address of board private structure 4459 * @adapter: address of board private structure
4463 **/ 4460 **/
4464
4465static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4461static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4466 struct e1000_rx_ring *rx_ring, 4462 struct e1000_rx_ring *rx_ring,
4467 int cleaned_count) 4463 int cleaned_count)
@@ -4532,8 +4528,7 @@ map_skb:
4532 break; /* while !buffer_info->skb */ 4528 break; /* while !buffer_info->skb */
4533 } 4529 }
4534 4530
4535 /* 4531 /* XXX if it was allocated cleanly it will never map to a
4536 * XXX if it was allocated cleanly it will never map to a
4537 * boundary crossing 4532 * boundary crossing
4538 */ 4533 */
4539 4534
@@ -4571,7 +4566,8 @@ map_skb:
4571 /* Force memory writes to complete before letting h/w 4566 /* Force memory writes to complete before letting h/w
4572 * know there are new descriptors to fetch. (Only 4567 * know there are new descriptors to fetch. (Only
4573 * applicable for weak-ordered memory model archs, 4568 * applicable for weak-ordered memory model archs,
4574 * such as IA-64). */ 4569 * such as IA-64).
4570 */
4575 wmb(); 4571 wmb();
4576 writel(i, hw->hw_addr + rx_ring->rdt); 4572 writel(i, hw->hw_addr + rx_ring->rdt);
4577 } 4573 }
@@ -4581,7 +4577,6 @@ map_skb:
4581 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4577 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4582 * @adapter: 4578 * @adapter:
4583 **/ 4579 **/
4584
4585static void e1000_smartspeed(struct e1000_adapter *adapter) 4580static void e1000_smartspeed(struct e1000_adapter *adapter)
4586{ 4581{
4587 struct e1000_hw *hw = &adapter->hw; 4582 struct e1000_hw *hw = &adapter->hw;
@@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
4594 4589
4595 if (adapter->smartspeed == 0) { 4590 if (adapter->smartspeed == 0) {
4596 /* If Master/Slave config fault is asserted twice, 4591 /* If Master/Slave config fault is asserted twice,
4597 * we assume back-to-back */ 4592 * we assume back-to-back
4593 */
4598 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4594 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4599 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4595 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4600 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4596 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4607,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
4607 adapter->smartspeed++; 4603 adapter->smartspeed++;
4608 if (!e1000_phy_setup_autoneg(hw) && 4604 if (!e1000_phy_setup_autoneg(hw) &&
4609 !e1000_read_phy_reg(hw, PHY_CTRL, 4605 !e1000_read_phy_reg(hw, PHY_CTRL,
4610 &phy_ctrl)) { 4606 &phy_ctrl)) {
4611 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4607 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4612 MII_CR_RESTART_AUTO_NEG); 4608 MII_CR_RESTART_AUTO_NEG);
4613 e1000_write_phy_reg(hw, PHY_CTRL, 4609 e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
4638 * @ifreq: 4634 * @ifreq:
4639 * @cmd: 4635 * @cmd:
4640 **/ 4636 **/
4641
4642static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4637static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4643{ 4638{
4644 switch (cmd) { 4639 switch (cmd) {
@@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4657 * @ifreq: 4652 * @ifreq:
4658 * @cmd: 4653 * @cmd:
4659 **/ 4654 **/
4660
4661static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4655static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4662 int cmd) 4656 int cmd)
4663{ 4657{
@@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4919 hw->autoneg = 0; 4913 hw->autoneg = 0;
4920 4914
4921 /* Make sure dplx is at most 1 bit and lsb of speed is not set 4915 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4922 * for the switch() below to work */ 4916 * for the switch() below to work
4917 */
4923 if ((spd & 1) || (dplx & ~1)) 4918 if ((spd & 1) || (dplx & ~1))
4924 goto err_inval; 4919 goto err_inval;
4925 4920
@@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
5122} 5117}
5123 5118
5124#ifdef CONFIG_NET_POLL_CONTROLLER 5119#ifdef CONFIG_NET_POLL_CONTROLLER
5125/* 5120/* Polling 'interrupt' - used by things like netconsole to send skbs
5126 * Polling 'interrupt' - used by things like netconsole to send skbs
5127 * without having to re-enable interrupts. It's not called while 5121 * without having to re-enable interrupts. It's not called while
5128 * the interrupt routine is executing. 5122 * the interrupt routine is executing.
5129 */ 5123 */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 750fc0194f37..c9cde352b1c8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
267 * value exists, a default value is used. The final value is stored 267 * value exists, a default value is used. The final value is stored
268 * in a variable in the adapter structure. 268 * in a variable in the adapter structure.
269 **/ 269 **/
270
271void e1000_check_options(struct e1000_adapter *adapter) 270void e1000_check_options(struct e1000_adapter *adapter)
272{ 271{
273 struct e1000_option opt; 272 struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
319 .def = E1000_DEFAULT_RXD, 318 .def = E1000_DEFAULT_RXD,
320 .arg = { .r = { 319 .arg = { .r = {
321 .min = E1000_MIN_RXD, 320 .min = E1000_MIN_RXD,
322 .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD 321 .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
322 E1000_MAX_82544_RXD
323 }} 323 }}
324 }; 324 };
325 325
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
408 if (num_TxAbsIntDelay > bd) { 408 if (num_TxAbsIntDelay > bd) {
409 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 409 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
410 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 410 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
411 adapter); 411 adapter);
412 } else { 412 } else {
413 adapter->tx_abs_int_delay = opt.def; 413 adapter->tx_abs_int_delay = opt.def;
414 } 414 }
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
426 if (num_RxIntDelay > bd) { 426 if (num_RxIntDelay > bd) {
427 adapter->rx_int_delay = RxIntDelay[bd]; 427 adapter->rx_int_delay = RxIntDelay[bd];
428 e1000_validate_option(&adapter->rx_int_delay, &opt, 428 e1000_validate_option(&adapter->rx_int_delay, &opt,
429 adapter); 429 adapter);
430 } else { 430 } else {
431 adapter->rx_int_delay = opt.def; 431 adapter->rx_int_delay = opt.def;
432 } 432 }
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
444 if (num_RxAbsIntDelay > bd) { 444 if (num_RxAbsIntDelay > bd) {
445 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 445 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
446 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 446 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
447 adapter); 447 adapter);
448 } else { 448 } else {
449 adapter->rx_abs_int_delay = opt.def; 449 adapter->rx_abs_int_delay = opt.def;
450 } 450 }
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
479 break; 479 break;
480 case 4: 480 case 4:
481 e_dev_info("%s set to simplified " 481 e_dev_info("%s set to simplified "
482 "(2000-8000) ints mode\n", opt.name); 482 "(2000-8000) ints mode\n", opt.name);
483 adapter->itr_setting = adapter->itr; 483 adapter->itr_setting = adapter->itr;
484 break; 484 break;
485 default: 485 default:
486 e1000_validate_option(&adapter->itr, &opt, 486 e1000_validate_option(&adapter->itr, &opt,
487 adapter); 487 adapter);
488 /* save the setting, because the dynamic bits 488 /* save the setting, because the dynamic bits
489 * change itr. 489 * change itr.
490 * clear the lower two bits because they are 490 * clear the lower two bits because they are
491 * used as control */ 491 * used as control
492 */
492 adapter->itr_setting = adapter->itr & ~3; 493 adapter->itr_setting = adapter->itr & ~3;
493 break; 494 break;
494 } 495 }
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
533 * 534 *
534 * Handles speed and duplex options on fiber adapters 535 * Handles speed and duplex options on fiber adapters
535 **/ 536 **/
536
537static void e1000_check_fiber_options(struct e1000_adapter *adapter) 537static void e1000_check_fiber_options(struct e1000_adapter *adapter)
538{ 538{
539 int bd = adapter->bd_number; 539 int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
559 * 559 *
560 * Handles speed and duplex options on copper adapters 560 * Handles speed and duplex options on copper adapters
561 **/ 561 **/
562
563static void e1000_check_copper_options(struct e1000_adapter *adapter) 562static void e1000_check_copper_options(struct e1000_adapter *adapter)
564{ 563{
565 struct e1000_option opt; 564 struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
681 e_dev_info("Using Autonegotiation at Half Duplex only\n"); 680 e_dev_info("Using Autonegotiation at Half Duplex only\n");
682 adapter->hw.autoneg = adapter->fc_autoneg = 1; 681 adapter->hw.autoneg = adapter->fc_autoneg = 1;
683 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | 682 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
684 ADVERTISE_100_HALF; 683 ADVERTISE_100_HALF;
685 break; 684 break;
686 case FULL_DUPLEX: 685 case FULL_DUPLEX:
687 e_dev_info("Full Duplex specified without Speed\n"); 686 e_dev_info("Full Duplex specified without Speed\n");
688 e_dev_info("Using Autonegotiation at Full Duplex only\n"); 687 e_dev_info("Using Autonegotiation at Full Duplex only\n");
689 adapter->hw.autoneg = adapter->fc_autoneg = 1; 688 adapter->hw.autoneg = adapter->fc_autoneg = 1;
690 adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | 689 adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
691 ADVERTISE_100_FULL | 690 ADVERTISE_100_FULL |
692 ADVERTISE_1000_FULL; 691 ADVERTISE_1000_FULL;
693 break; 692 break;
694 case SPEED_10: 693 case SPEED_10:
695 e_dev_info("10 Mbps Speed specified without Duplex\n"); 694 e_dev_info("10 Mbps Speed specified without Duplex\n");
696 e_dev_info("Using Autonegotiation at 10 Mbps only\n"); 695 e_dev_info("Using Autonegotiation at 10 Mbps only\n");
697 adapter->hw.autoneg = adapter->fc_autoneg = 1; 696 adapter->hw.autoneg = adapter->fc_autoneg = 1;
698 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | 697 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
699 ADVERTISE_10_FULL; 698 ADVERTISE_10_FULL;
700 break; 699 break;
701 case SPEED_10 + HALF_DUPLEX: 700 case SPEED_10 + HALF_DUPLEX:
702 e_dev_info("Forcing to 10 Mbps Half Duplex\n"); 701 e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
715 e_dev_info("Using Autonegotiation at 100 Mbps only\n"); 714 e_dev_info("Using Autonegotiation at 100 Mbps only\n");
716 adapter->hw.autoneg = adapter->fc_autoneg = 1; 715 adapter->hw.autoneg = adapter->fc_autoneg = 1;
717 adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | 716 adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
718 ADVERTISE_100_FULL; 717 ADVERTISE_100_FULL;
719 break; 718 break;
720 case SPEED_100 + HALF_DUPLEX: 719 case SPEED_100 + HALF_DUPLEX:
721 e_dev_info("Forcing to 100 Mbps Half Duplex\n"); 720 e_dev_info("Forcing to 100 Mbps Half Duplex\n");
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3f7dbd1cdb0f..a177b8b65c44 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
4830 &adapter->link_speed, 4830 &adapter->link_speed,
4831 &adapter->link_duplex); 4831 &adapter->link_duplex);
4832 e1000_print_link_info(adapter); 4832 e1000_print_link_info(adapter);
4833
4834 /* check if SmartSpeed worked */
4835 e1000e_check_downshift(hw);
4836 if (phy->speed_downgraded)
4837 netdev_warn(netdev,
4838 "Link Speed was downgraded by SmartSpeed\n");
4839
4833 /* On supported PHYs, check for duplex mismatch only 4840 /* On supported PHYs, check for duplex mismatch only
4834 * if link has autonegotiated at 10/100 half 4841 * if link has autonegotiated at 10/100 half
4835 */ 4842 */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 40b5d568d808..a3830a8ba4c1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1891 } else { 1891 } else {
1892 hw->mac.ops.check_for_link(&adapter->hw); 1892 hw->mac.ops.check_for_link(&adapter->hw);
1893 if (hw->mac.autoneg) 1893 if (hw->mac.autoneg)
1894 msleep(4000); 1894 msleep(5000);
1895 1895
1896 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) 1896 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1897 *data = 1; 1897 *data = 1;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fdca7b672776..a1463e3d14c0 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -127,8 +127,8 @@ struct igbvf_buffer {
127 /* Tx */ 127 /* Tx */
128 struct { 128 struct {
129 unsigned long time_stamp; 129 unsigned long time_stamp;
130 union e1000_adv_tx_desc *next_to_watch;
130 u16 length; 131 u16 length;
131 u16 next_to_watch;
132 u16 mapped_as_page; 132 u16 mapped_as_page;
133 }; 133 };
134 /* Rx */ 134 /* Rx */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f53f7136e508..d60cd4393415 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
797 struct sk_buff *skb; 797 struct sk_buff *skb;
798 union e1000_adv_tx_desc *tx_desc, *eop_desc; 798 union e1000_adv_tx_desc *tx_desc, *eop_desc;
799 unsigned int total_bytes = 0, total_packets = 0; 799 unsigned int total_bytes = 0, total_packets = 0;
800 unsigned int i, eop, count = 0; 800 unsigned int i, count = 0;
801 bool cleaned = false; 801 bool cleaned = false;
802 802
803 i = tx_ring->next_to_clean; 803 i = tx_ring->next_to_clean;
804 eop = tx_ring->buffer_info[i].next_to_watch; 804 buffer_info = &tx_ring->buffer_info[i];
805 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 805 eop_desc = buffer_info->next_to_watch;
806
807 do {
808 /* if next_to_watch is not set then there is no work pending */
809 if (!eop_desc)
810 break;
811
812 /* prevent any other reads prior to eop_desc */
813 read_barrier_depends();
814
815 /* if DD is not set pending work has not been completed */
816 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
817 break;
818
819 /* clear next_to_watch to prevent false hangs */
820 buffer_info->next_to_watch = NULL;
806 821
807 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
808 (count < tx_ring->count)) {
809 rmb(); /* read buffer_info after eop_desc status */
810 for (cleaned = false; !cleaned; count++) { 822 for (cleaned = false; !cleaned; count++) {
811 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 823 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
812 buffer_info = &tx_ring->buffer_info[i]; 824 cleaned = (tx_desc == eop_desc);
813 cleaned = (i == eop);
814 skb = buffer_info->skb; 825 skb = buffer_info->skb;
815 826
816 if (skb) { 827 if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
831 i++; 842 i++;
832 if (i == tx_ring->count) 843 if (i == tx_ring->count)
833 i = 0; 844 i = 0;
845
846 buffer_info = &tx_ring->buffer_info[i];
834 } 847 }
835 eop = tx_ring->buffer_info[i].next_to_watch; 848
836 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 849 eop_desc = buffer_info->next_to_watch;
837 } 850 } while (count < tx_ring->count);
838 851
839 tx_ring->next_to_clean = i; 852 tx_ring->next_to_clean = i;
840 853
@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1961 context_desc->seqnum_seed = 0; 1974 context_desc->seqnum_seed = 0;
1962 1975
1963 buffer_info->time_stamp = jiffies; 1976 buffer_info->time_stamp = jiffies;
1964 buffer_info->next_to_watch = i;
1965 buffer_info->dma = 0; 1977 buffer_info->dma = 0;
1966 i++; 1978 i++;
1967 if (i == tx_ring->count) 1979 if (i == tx_ring->count)
@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2021 context_desc->mss_l4len_idx = 0; 2033 context_desc->mss_l4len_idx = 0;
2022 2034
2023 buffer_info->time_stamp = jiffies; 2035 buffer_info->time_stamp = jiffies;
2024 buffer_info->next_to_watch = i;
2025 buffer_info->dma = 0; 2036 buffer_info->dma = 0;
2026 i++; 2037 i++;
2027 if (i == tx_ring->count) 2038 if (i == tx_ring->count)
@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2061 2072
2062static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2073static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2063 struct igbvf_ring *tx_ring, 2074 struct igbvf_ring *tx_ring,
2064 struct sk_buff *skb, 2075 struct sk_buff *skb)
2065 unsigned int first)
2066{ 2076{
2067 struct igbvf_buffer *buffer_info; 2077 struct igbvf_buffer *buffer_info;
2068 struct pci_dev *pdev = adapter->pdev; 2078 struct pci_dev *pdev = adapter->pdev;
@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2077 buffer_info->length = len; 2087 buffer_info->length = len;
2078 /* set time_stamp *before* dma to help avoid a possible race */ 2088 /* set time_stamp *before* dma to help avoid a possible race */
2079 buffer_info->time_stamp = jiffies; 2089 buffer_info->time_stamp = jiffies;
2080 buffer_info->next_to_watch = i;
2081 buffer_info->mapped_as_page = false; 2090 buffer_info->mapped_as_page = false;
2082 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, 2091 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2083 DMA_TO_DEVICE); 2092 DMA_TO_DEVICE);
@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2100 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2109 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2101 buffer_info->length = len; 2110 buffer_info->length = len;
2102 buffer_info->time_stamp = jiffies; 2111 buffer_info->time_stamp = jiffies;
2103 buffer_info->next_to_watch = i;
2104 buffer_info->mapped_as_page = true; 2112 buffer_info->mapped_as_page = true;
2105 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, 2113 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2106 DMA_TO_DEVICE); 2114 DMA_TO_DEVICE);
@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2109 } 2117 }
2110 2118
2111 tx_ring->buffer_info[i].skb = skb; 2119 tx_ring->buffer_info[i].skb = skb;
2112 tx_ring->buffer_info[first].next_to_watch = i;
2113 2120
2114 return ++count; 2121 return ++count;
2115 2122
@@ -2120,7 +2127,6 @@ dma_error:
2120 buffer_info->dma = 0; 2127 buffer_info->dma = 0;
2121 buffer_info->time_stamp = 0; 2128 buffer_info->time_stamp = 0;
2122 buffer_info->length = 0; 2129 buffer_info->length = 0;
2123 buffer_info->next_to_watch = 0;
2124 buffer_info->mapped_as_page = false; 2130 buffer_info->mapped_as_page = false;
2125 if (count) 2131 if (count)
2126 count--; 2132 count--;
@@ -2139,7 +2145,8 @@ dma_error:
2139 2145
2140static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2146static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2141 struct igbvf_ring *tx_ring, 2147 struct igbvf_ring *tx_ring,
2142 int tx_flags, int count, u32 paylen, 2148 int tx_flags, int count,
2149 unsigned int first, u32 paylen,
2143 u8 hdr_len) 2150 u8 hdr_len)
2144{ 2151{
2145 union e1000_adv_tx_desc *tx_desc = NULL; 2152 union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2189 * such as IA-64). */ 2196 * such as IA-64). */
2190 wmb(); 2197 wmb();
2191 2198
2199 tx_ring->buffer_info[first].next_to_watch = tx_desc;
2192 tx_ring->next_to_use = i; 2200 tx_ring->next_to_use = i;
2193 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2201 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2194 /* we need this if more than one processor can write to our tail 2202 /* we need this if more than one processor can write to our tail
@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2255 * count reflects descriptors mapped, if 0 then mapping error 2263 * count reflects descriptors mapped, if 0 then mapping error
2256 * has occurred and we need to rewind the descriptor queue 2264 * has occurred and we need to rewind the descriptor queue
2257 */ 2265 */
2258 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); 2266 count = igbvf_tx_map_adv(adapter, tx_ring, skb);
2259 2267
2260 if (count) { 2268 if (count) {
2261 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2269 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2262 skb->len, hdr_len); 2270 first, skb->len, hdr_len);
2263 /* Make sure there is space in the ring for the next send. */ 2271 /* Make sure there is space in the ring for the next send. */
2264 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2272 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2265 } else { 2273 } else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index d268c7b222c7..a8e10cff7a89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -156,7 +156,7 @@ struct vf_macvlans {
156 156
157/* Tx Descriptors needed, worst case */ 157/* Tx Descriptors needed, worst case */
158#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 158#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
159#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) 159#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
160 160
161/* wrapper around a pointer to a socket buffer, 161/* wrapper around a pointer to a socket buffer,
162 * so a DMA handle can be stored along with the buffer */ 162 * so a DMA handle can be stored along with the buffer */
@@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats {
201 201
202enum ixgbe_ring_state_t { 202enum ixgbe_ring_state_t {
203 __IXGBE_TX_FDIR_INIT_DONE, 203 __IXGBE_TX_FDIR_INIT_DONE,
204 __IXGBE_TX_XPS_INIT_DONE,
204 __IXGBE_TX_DETECT_HANG, 205 __IXGBE_TX_DETECT_HANG,
205 __IXGBE_HANG_CHECK_ARMED, 206 __IXGBE_HANG_CHECK_ARMED,
206 __IXGBE_RX_RSC_ENABLED, 207 __IXGBE_RX_RSC_ENABLED,
@@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum {
278 279
279#define IXGBE_MAX_RSS_INDICES 16 280#define IXGBE_MAX_RSS_INDICES 16
280#define IXGBE_MAX_VMDQ_INDICES 64 281#define IXGBE_MAX_VMDQ_INDICES 64
281#define IXGBE_MAX_FDIR_INDICES 64 282#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
282#ifdef IXGBE_FCOE
283#define IXGBE_MAX_FCOE_INDICES 8 283#define IXGBE_MAX_FCOE_INDICES 8
284#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) 284#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
285#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) 285#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
286#else
287#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
288#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
289#endif /* IXGBE_FCOE */
290struct ixgbe_ring_feature { 286struct ixgbe_ring_feature {
291 u16 limit; /* upper limit on feature indices */ 287 u16 limit; /* upper limit on feature indices */
292 u16 indices; /* current value of indices */ 288 u16 indices; /* current value of indices */
@@ -624,6 +620,7 @@ enum ixgbe_state_t {
624 __IXGBE_DOWN, 620 __IXGBE_DOWN,
625 __IXGBE_SERVICE_SCHED, 621 __IXGBE_SERVICE_SCHED,
626 __IXGBE_IN_SFP_INIT, 622 __IXGBE_IN_SFP_INIT,
623 __IXGBE_READ_I2C,
627}; 624};
628 625
629struct ixgbe_cb { 626struct ixgbe_cb {
@@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
704extern void ixgbe_set_rx_mode(struct net_device *netdev); 701extern void ixgbe_set_rx_mode(struct net_device *netdev);
705#ifdef CONFIG_IXGBE_DCB 702#ifdef CONFIG_IXGBE_DCB
706extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 703extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
707extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
708#endif 704#endif
705extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
709extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 706extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
710extern void ixgbe_do_reset(struct net_device *netdev); 707extern void ixgbe_do_reset(struct net_device *netdev);
711#ifdef CONFIG_IXGBE_HWMON 708#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index db6735931d66..f4d2e9e3c6d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -39,6 +39,7 @@
39#include <linux/uaccess.h> 39#include <linux/uaccess.h>
40 40
41#include "ixgbe.h" 41#include "ixgbe.h"
42#include "ixgbe_phy.h"
42 43
43 44
44#define IXGBE_ALL_RAR_ENTRIES 16 45#define IXGBE_ALL_RAR_ENTRIES 16
@@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2112 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2113 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2113 struct ixgbe_q_vector *q_vector; 2114 struct ixgbe_q_vector *q_vector;
2114 int i; 2115 int i;
2115 u16 tx_itr_param, rx_itr_param; 2116 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2116 bool need_reset = false; 2117 bool need_reset = false;
2117 2118
2118 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2119 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2119 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count 2120 /* reject Tx specific changes in case of mixed RxTx vectors */
2120 && ec->tx_coalesce_usecs) 2121 if (ec->tx_coalesce_usecs)
2121 return -EINVAL; 2122 return -EINVAL;
2123 tx_itr_prev = adapter->rx_itr_setting;
2124 } else {
2125 tx_itr_prev = adapter->tx_itr_setting;
2126 }
2122 2127
2123 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || 2128 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2124 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) 2129 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2144 else 2149 else
2145 tx_itr_param = adapter->tx_itr_setting; 2150 tx_itr_param = adapter->tx_itr_setting;
2146 2151
2152 /* mixed Rx/Tx */
2153 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2154 adapter->tx_itr_setting = adapter->rx_itr_setting;
2155
2156#if IS_ENABLED(CONFIG_BQL)
2157 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2158 if ((adapter->tx_itr_setting > 1) &&
2159 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2160 if ((tx_itr_prev == 1) ||
2161 (tx_itr_prev > IXGBE_100K_ITR))
2162 need_reset = true;
2163 } else {
2164 if ((tx_itr_prev > 1) &&
2165 (tx_itr_prev < IXGBE_100K_ITR))
2166 need_reset = true;
2167 }
2168#endif
2147 /* check the old value and enable RSC if necessary */ 2169 /* check the old value and enable RSC if necessary */
2148 need_reset = ixgbe_update_rsc(adapter); 2170 need_reset |= ixgbe_update_rsc(adapter);
2149 2171
2150 for (i = 0; i < adapter->num_q_vectors; i++) { 2172 for (i = 0; i < adapter->num_q_vectors; i++) {
2151 q_vector = adapter->q_vector[i]; 2173 q_vector = adapter->q_vector[i];
@@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2731 return 0; 2753 return 0;
2732} 2754}
2733 2755
2756static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
2757{
2758 unsigned int max_combined;
2759 u8 tcs = netdev_get_num_tc(adapter->netdev);
2760
2761 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2762 /* We only support one q_vector without MSI-X */
2763 max_combined = 1;
2764 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2765 /* SR-IOV currently only allows one queue on the PF */
2766 max_combined = 1;
2767 } else if (tcs > 1) {
2768 /* For DCB report channels per traffic class */
2769 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2770 /* 8 TC w/ 4 queues per TC */
2771 max_combined = 4;
2772 } else if (tcs > 4) {
2773 /* 8 TC w/ 8 queues per TC */
2774 max_combined = 8;
2775 } else {
2776 /* 4 TC w/ 16 queues per TC */
2777 max_combined = 16;
2778 }
2779 } else if (adapter->atr_sample_rate) {
2780 /* support up to 64 queues with ATR */
2781 max_combined = IXGBE_MAX_FDIR_INDICES;
2782 } else {
2783 /* support up to 16 queues with RSS */
2784 max_combined = IXGBE_MAX_RSS_INDICES;
2785 }
2786
2787 return max_combined;
2788}
2789
2790static void ixgbe_get_channels(struct net_device *dev,
2791 struct ethtool_channels *ch)
2792{
2793 struct ixgbe_adapter *adapter = netdev_priv(dev);
2794
2795 /* report maximum channels */
2796 ch->max_combined = ixgbe_max_channels(adapter);
2797
2798 /* report info for other vector */
2799 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2800 ch->max_other = NON_Q_VECTORS;
2801 ch->other_count = NON_Q_VECTORS;
2802 }
2803
2804 /* record RSS queues */
2805 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
2806
2807 /* nothing else to report if RSS is disabled */
2808 if (ch->combined_count == 1)
2809 return;
2810
2811 /* we do not support ATR queueing if SR-IOV is enabled */
2812 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
2813 return;
2814
2815 /* same thing goes for being DCB enabled */
2816 if (netdev_get_num_tc(dev) > 1)
2817 return;
2818
2819 /* if ATR is disabled we can exit */
2820 if (!adapter->atr_sample_rate)
2821 return;
2822
2823 /* report flow director queues as maximum channels */
2824 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
2825}
2826
2827static int ixgbe_set_channels(struct net_device *dev,
2828 struct ethtool_channels *ch)
2829{
2830 struct ixgbe_adapter *adapter = netdev_priv(dev);
2831 unsigned int count = ch->combined_count;
2832
2833 /* verify they are not requesting separate vectors */
2834 if (!count || ch->rx_count || ch->tx_count)
2835 return -EINVAL;
2836
2837 /* verify other_count has not changed */
2838 if (ch->other_count != NON_Q_VECTORS)
2839 return -EINVAL;
2840
2841 /* verify the number of channels does not exceed hardware limits */
2842 if (count > ixgbe_max_channels(adapter))
2843 return -EINVAL;
2844
2845 /* update feature limits from largest to smallest supported values */
2846 adapter->ring_feature[RING_F_FDIR].limit = count;
2847
2848 /* cap RSS limit at 16 */
2849 if (count > IXGBE_MAX_RSS_INDICES)
2850 count = IXGBE_MAX_RSS_INDICES;
2851 adapter->ring_feature[RING_F_RSS].limit = count;
2852
2853#ifdef IXGBE_FCOE
2854 /* cap FCoE limit at 8 */
2855 if (count > IXGBE_FCRETA_SIZE)
2856 count = IXGBE_FCRETA_SIZE;
2857 adapter->ring_feature[RING_F_FCOE].limit = count;
2858
2859#endif
2860 /* use setup TC to update any traffic class queue mapping */
2861 return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
2862}
2863
2864static int ixgbe_get_module_info(struct net_device *dev,
2865 struct ethtool_modinfo *modinfo)
2866{
2867 struct ixgbe_adapter *adapter = netdev_priv(dev);
2868 struct ixgbe_hw *hw = &adapter->hw;
2869 u32 status;
2870 u8 sff8472_rev, addr_mode;
2871 int ret_val = 0;
2872 bool page_swap = false;
2873
2874 /* avoid concurent i2c reads */
2875 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2876 msleep(100);
2877
2878 /* used by the service task */
2879 set_bit(__IXGBE_READ_I2C, &adapter->state);
2880
2881 /* Check whether we support SFF-8472 or not */
2882 status = hw->phy.ops.read_i2c_eeprom(hw,
2883 IXGBE_SFF_SFF_8472_COMP,
2884 &sff8472_rev);
2885 if (status != 0) {
2886 ret_val = -EIO;
2887 goto err_out;
2888 }
2889
2890 /* addressing mode is not supported */
2891 status = hw->phy.ops.read_i2c_eeprom(hw,
2892 IXGBE_SFF_SFF_8472_SWAP,
2893 &addr_mode);
2894 if (status != 0) {
2895 ret_val = -EIO;
2896 goto err_out;
2897 }
2898
2899 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
2900 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
2901 page_swap = true;
2902 }
2903
2904 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
2905 /* We have a SFP, but it does not support SFF-8472 */
2906 modinfo->type = ETH_MODULE_SFF_8079;
2907 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2908 } else {
2909 /* We have a SFP which supports a revision of SFF-8472. */
2910 modinfo->type = ETH_MODULE_SFF_8472;
2911 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2912 }
2913
2914err_out:
2915 clear_bit(__IXGBE_READ_I2C, &adapter->state);
2916 return ret_val;
2917}
2918
2919static int ixgbe_get_module_eeprom(struct net_device *dev,
2920 struct ethtool_eeprom *ee,
2921 u8 *data)
2922{
2923 struct ixgbe_adapter *adapter = netdev_priv(dev);
2924 struct ixgbe_hw *hw = &adapter->hw;
2925 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2926 u8 databyte = 0xFF;
2927 int i = 0;
2928 int ret_val = 0;
2929
2930 /* ixgbe_get_module_info is called before this function in all
2931 * cases, so we do not need any checks we already do above,
2932 * and can trust ee->len to be a known value.
2933 */
2934
2935 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2936 msleep(100);
2937 set_bit(__IXGBE_READ_I2C, &adapter->state);
2938
2939 /* Read the first block, SFF-8079 */
2940 for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
2941 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2942 if (status != 0) {
2943 /* Error occured while reading module */
2944 ret_val = -EIO;
2945 goto err_out;
2946 }
2947 data[i] = databyte;
2948 }
2949
2950 /* If the second block is requested, check if SFF-8472 is supported. */
2951 if (ee->len == ETH_MODULE_SFF_8472_LEN) {
2952 if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
2953 return -EOPNOTSUPP;
2954
2955 /* Read the second block, SFF-8472 */
2956 for (i = ETH_MODULE_SFF_8079_LEN;
2957 i < ETH_MODULE_SFF_8472_LEN; i++) {
2958 status = hw->phy.ops.read_i2c_sff8472(hw,
2959 i - ETH_MODULE_SFF_8079_LEN, &databyte);
2960 if (status != 0) {
2961 /* Error occured while reading module */
2962 ret_val = -EIO;
2963 goto err_out;
2964 }
2965 data[i] = databyte;
2966 }
2967 }
2968
2969err_out:
2970 clear_bit(__IXGBE_READ_I2C, &adapter->state);
2971
2972 return ret_val;
2973}
2974
2734static const struct ethtool_ops ixgbe_ethtool_ops = { 2975static const struct ethtool_ops ixgbe_ethtool_ops = {
2735 .get_settings = ixgbe_get_settings, 2976 .get_settings = ixgbe_get_settings,
2736 .set_settings = ixgbe_set_settings, 2977 .set_settings = ixgbe_set_settings,
@@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2759 .set_coalesce = ixgbe_set_coalesce, 3000 .set_coalesce = ixgbe_set_coalesce,
2760 .get_rxnfc = ixgbe_get_rxnfc, 3001 .get_rxnfc = ixgbe_get_rxnfc,
2761 .set_rxnfc = ixgbe_set_rxnfc, 3002 .set_rxnfc = ixgbe_set_rxnfc,
3003 .get_channels = ixgbe_get_channels,
3004 .set_channels = ixgbe_set_channels,
2762 .get_ts_info = ixgbe_get_ts_info, 3005 .get_ts_info = ixgbe_get_ts_info,
3006 .get_module_info = ixgbe_get_module_info,
3007 .get_module_eeprom = ixgbe_get_module_eeprom,
2763}; 3008};
2764 3009
2765void ixgbe_set_ethtool_ops(struct net_device *netdev) 3010void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 9ffa3309e54d..ef5f7a678ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
386 fcoe = &adapter->ring_feature[RING_F_FCOE]; 386 fcoe = &adapter->ring_feature[RING_F_FCOE];
387 387
388 /* limit ourselves based on feature limits */ 388 /* limit ourselves based on feature limits */
389 fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
390 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 389 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
391 390
392 if (fcoe_i) { 391 if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
562 fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 561 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
563 562
564 if (vmdq_i > 1 && fcoe_i) { 563 if (vmdq_i > 1 && fcoe_i) {
565 /* reserve no more than number of CPUs */
566 fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
567
568 /* alloc queues for FCoE separately */ 564 /* alloc queues for FCoE separately */
569 fcoe->indices = fcoe_i; 565 fcoe->indices = fcoe_i;
570 fcoe->offset = vmdq_i * rss_i; 566 fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
623 if (rss_i > 1 && adapter->atr_sample_rate) { 619 if (rss_i > 1 && adapter->atr_sample_rate) {
624 f = &adapter->ring_feature[RING_F_FDIR]; 620 f = &adapter->ring_feature[RING_F_FDIR];
625 621
626 f->indices = min_t(u16, num_online_cpus(), f->limit); 622 rss_i = f->indices = f->limit;
627 rss_i = max_t(u16, rss_i, f->indices);
628 623
629 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 624 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
630 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 625 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
776{ 771{
777 struct ixgbe_q_vector *q_vector; 772 struct ixgbe_q_vector *q_vector;
778 struct ixgbe_ring *ring; 773 struct ixgbe_ring *ring;
779 int node = -1; 774 int node = NUMA_NO_NODE;
780 int cpu = -1; 775 int cpu = -1;
781 int ring_count, size; 776 int ring_count, size;
777 u8 tcs = netdev_get_num_tc(adapter->netdev);
782 778
783 ring_count = txr_count + rxr_count; 779 ring_count = txr_count + rxr_count;
784 size = sizeof(struct ixgbe_q_vector) + 780 size = sizeof(struct ixgbe_q_vector) +
785 (sizeof(struct ixgbe_ring) * ring_count); 781 (sizeof(struct ixgbe_ring) * ring_count);
786 782
787 /* customize cpu for Flow Director mapping */ 783 /* customize cpu for Flow Director mapping */
788 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 784 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
789 if (cpu_online(v_idx)) { 785 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
790 cpu = v_idx; 786 if (rss_i > 1 && adapter->atr_sample_rate) {
791 node = cpu_to_node(cpu); 787 if (cpu_online(v_idx)) {
788 cpu = v_idx;
789 node = cpu_to_node(cpu);
790 }
792 } 791 }
793 } 792 }
794 793
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1d5e093e988a..68478d6dfa2d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2786 2786
2787 /* 2787 /*
2788 * set WTHRESH to encourage burst writeback, it should not be set 2788 * set WTHRESH to encourage burst writeback, it should not be set
2789 * higher than 1 when ITR is 0 as it could cause false TX hangs 2789 * higher than 1 when:
2790 * - ITR is 0 as it could cause false TX hangs
2791 * - ITR is set to > 100k int/sec and BQL is enabled
2790 * 2792 *
2791 * In order to avoid issues WTHRESH + PTHRESH should always be equal 2793 * In order to avoid issues WTHRESH + PTHRESH should always be equal
2792 * to or less than the number of on chip descriptors, which is 2794 * to or less than the number of on chip descriptors, which is
2793 * currently 40. 2795 * currently 40.
2794 */ 2796 */
2797#if IS_ENABLED(CONFIG_BQL)
2798 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
2799#else
2795 if (!ring->q_vector || (ring->q_vector->itr < 8)) 2800 if (!ring->q_vector || (ring->q_vector->itr < 8))
2801#endif
2796 txdctl |= (1 << 16); /* WTHRESH = 1 */ 2802 txdctl |= (1 << 16); /* WTHRESH = 1 */
2797 else 2803 else
2798 txdctl |= (8 << 16); /* WTHRESH = 8 */ 2804 txdctl |= (8 << 16); /* WTHRESH = 8 */
@@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2813 ring->atr_sample_rate = 0; 2819 ring->atr_sample_rate = 0;
2814 } 2820 }
2815 2821
2822 /* initialize XPS */
2823 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
2824 struct ixgbe_q_vector *q_vector = ring->q_vector;
2825
2826 if (q_vector)
2827 netif_set_xps_queue(adapter->netdev,
2828 &q_vector->affinity_mask,
2829 ring->queue_index);
2830 }
2831
2816 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); 2832 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2817 2833
2818 /* enable queue */ 2834 /* enable queue */
@@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4465{ 4481{
4466 struct ixgbe_hw *hw = &adapter->hw; 4482 struct ixgbe_hw *hw = &adapter->hw;
4467 struct pci_dev *pdev = adapter->pdev; 4483 struct pci_dev *pdev = adapter->pdev;
4468 unsigned int rss; 4484 unsigned int rss, fdir;
4469 u32 fwsm; 4485 u32 fwsm;
4470#ifdef CONFIG_IXGBE_DCB 4486#ifdef CONFIG_IXGBE_DCB
4471 int j; 4487 int j;
@@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4485 adapter->ring_feature[RING_F_RSS].limit = rss; 4501 adapter->ring_feature[RING_F_RSS].limit = rss;
4486 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4502 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4487 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4503 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4488 adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES;
4489 adapter->max_q_vectors = MAX_Q_VECTORS_82599; 4504 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4490 adapter->atr_sample_rate = 20; 4505 adapter->atr_sample_rate = 20;
4506 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
4507 adapter->ring_feature[RING_F_FDIR].limit = fdir;
4491 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; 4508 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
4492#ifdef CONFIG_IXGBE_DCA 4509#ifdef CONFIG_IXGBE_DCA
4493 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; 4510 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
@@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5698 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) 5715 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5699 return; 5716 return;
5700 5717
5718 /* concurent i2c reads are not supported */
5719 if (test_bit(__IXGBE_READ_I2C, &adapter->state))
5720 return;
5721
5701 /* someone else is in init, wait until next service event */ 5722 /* someone else is in init, wait until next service event */
5702 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) 5723 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5703 return; 5724 return;
@@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6363 return __ixgbe_maybe_stop_tx(tx_ring, size); 6384 return __ixgbe_maybe_stop_tx(tx_ring, size);
6364} 6385}
6365 6386
6387#ifdef IXGBE_FCOE
6366static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6388static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6367{ 6389{
6368 struct ixgbe_adapter *adapter = netdev_priv(dev); 6390 struct ixgbe_adapter *adapter;
6369 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 6391 struct ixgbe_ring_feature *f;
6370 smp_processor_id(); 6392 int txq;
6371#ifdef IXGBE_FCOE
6372 __be16 protocol = vlan_get_protocol(skb);
6373 6393
6374 if (((protocol == htons(ETH_P_FCOE)) || 6394 /*
6375 (protocol == htons(ETH_P_FIP))) && 6395 * only execute the code below if protocol is FCoE
6376 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 6396 * or FIP and we have FCoE enabled on the adapter
6377 struct ixgbe_ring_feature *f; 6397 */
6398 switch (vlan_get_protocol(skb)) {
6399 case __constant_htons(ETH_P_FCOE):
6400 case __constant_htons(ETH_P_FIP):
6401 adapter = netdev_priv(dev);
6378 6402
6379 f = &adapter->ring_feature[RING_F_FCOE]; 6403 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6404 break;
6405 default:
6406 return __netdev_pick_tx(dev, skb);
6407 }
6380 6408
6381 while (txq >= f->indices) 6409 f = &adapter->ring_feature[RING_F_FCOE];
6382 txq -= f->indices;
6383 txq += adapter->ring_feature[RING_F_FCOE].offset;
6384 6410
6385 return txq; 6411 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6386 } 6412 smp_processor_id();
6387#endif
6388 6413
6389 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 6414 while (txq >= f->indices)
6390 while (unlikely(txq >= dev->real_num_tx_queues)) 6415 txq -= f->indices;
6391 txq -= dev->real_num_tx_queues;
6392 return txq;
6393 }
6394 6416
6395 return skb_tx_hash(dev, skb); 6417 return txq + f->offset;
6396} 6418}
6397 6419
6420#endif
6398netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6421netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6399 struct ixgbe_adapter *adapter, 6422 struct ixgbe_adapter *adapter,
6400 struct ixgbe_ring *tx_ring) 6423 struct ixgbe_ring *tx_ring)
@@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
6799 } 6822 }
6800} 6823}
6801 6824
6825#endif /* CONFIG_IXGBE_DCB */
6802/** 6826/**
6803 * ixgbe_setup_tc - configure net_device for multiple traffic classes 6827 * ixgbe_setup_tc - configure net_device for multiple traffic classes
6804 * 6828 *
@@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6824 ixgbe_close(dev); 6848 ixgbe_close(dev);
6825 ixgbe_clear_interrupt_scheme(adapter); 6849 ixgbe_clear_interrupt_scheme(adapter);
6826 6850
6851#ifdef CONFIG_IXGBE_DCB
6827 if (tc) { 6852 if (tc) {
6828 netdev_set_num_tc(dev, tc); 6853 netdev_set_num_tc(dev, tc);
6829 ixgbe_set_prio_tc_map(adapter); 6854 ixgbe_set_prio_tc_map(adapter);
@@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6846 adapter->dcb_cfg.pfc_mode_enable = false; 6871 adapter->dcb_cfg.pfc_mode_enable = false;
6847 } 6872 }
6848 6873
6849 ixgbe_init_interrupt_scheme(adapter);
6850 ixgbe_validate_rtr(adapter, tc); 6874 ixgbe_validate_rtr(adapter, tc);
6875
6876#endif /* CONFIG_IXGBE_DCB */
6877 ixgbe_init_interrupt_scheme(adapter);
6878
6851 if (netif_running(dev)) 6879 if (netif_running(dev))
6852 ixgbe_open(dev); 6880 return ixgbe_open(dev);
6853 6881
6854 return 0; 6882 return 0;
6855} 6883}
6856 6884
6857#endif /* CONFIG_IXGBE_DCB */
6858#ifdef CONFIG_PCI_IOV 6885#ifdef CONFIG_PCI_IOV
6859void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) 6886void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
6860{ 6887{
6861 struct net_device *netdev = adapter->netdev; 6888 struct net_device *netdev = adapter->netdev;
6862 6889
6863 rtnl_lock(); 6890 rtnl_lock();
6864#ifdef CONFIG_IXGBE_DCB
6865 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); 6891 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
6866#else
6867 if (netif_running(netdev))
6868 ixgbe_close(netdev);
6869 ixgbe_clear_interrupt_scheme(adapter);
6870 ixgbe_init_interrupt_scheme(adapter);
6871 if (netif_running(netdev))
6872 ixgbe_open(netdev);
6873#endif
6874 rtnl_unlock(); 6892 rtnl_unlock();
6875} 6893}
6876 6894
@@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7118 .ndo_open = ixgbe_open, 7136 .ndo_open = ixgbe_open,
7119 .ndo_stop = ixgbe_close, 7137 .ndo_stop = ixgbe_close,
7120 .ndo_start_xmit = ixgbe_xmit_frame, 7138 .ndo_start_xmit = ixgbe_xmit_frame,
7139#ifdef IXGBE_FCOE
7121 .ndo_select_queue = ixgbe_select_queue, 7140 .ndo_select_queue = ixgbe_select_queue,
7141#endif
7122 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7142 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7123 .ndo_validate_addr = eth_validate_addr, 7143 .ndo_validate_addr = eth_validate_addr,
7124 .ndo_set_mac_address = ixgbe_set_mac, 7144 .ndo_set_mac_address = ixgbe_set_mac,
@@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7230 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 7250 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7231 static int cards_found; 7251 static int cards_found;
7232 int i, err, pci_using_dac; 7252 int i, err, pci_using_dac;
7253 unsigned int indices = MAX_TX_QUEUES;
7233 u8 part_str[IXGBE_PBANUM_LENGTH]; 7254 u8 part_str[IXGBE_PBANUM_LENGTH];
7234 unsigned int indices = num_possible_cpus();
7235 unsigned int dcb_max = 0;
7236#ifdef IXGBE_FCOE 7255#ifdef IXGBE_FCOE
7237 u16 device_caps; 7256 u16 device_caps;
7238#endif 7257#endif
@@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7281 pci_set_master(pdev); 7300 pci_set_master(pdev);
7282 pci_save_state(pdev); 7301 pci_save_state(pdev);
7283 7302
7303 if (ii->mac == ixgbe_mac_82598EB) {
7284#ifdef CONFIG_IXGBE_DCB 7304#ifdef CONFIG_IXGBE_DCB
7285 if (ii->mac == ixgbe_mac_82598EB) 7305 /* 8 TC w/ 4 queues per TC */
7286 dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, 7306 indices = 4 * MAX_TRAFFIC_CLASS;
7287 IXGBE_MAX_RSS_INDICES); 7307#else
7288 else 7308 indices = IXGBE_MAX_RSS_INDICES;
7289 dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
7290 IXGBE_MAX_FDIR_INDICES);
7291#endif 7309#endif
7310 }
7292 7311
7293 if (ii->mac == ixgbe_mac_82598EB)
7294 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7295 else
7296 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7297
7298#ifdef IXGBE_FCOE
7299 indices += min_t(unsigned int, num_possible_cpus(),
7300 IXGBE_MAX_FCOE_INDICES);
7301#endif
7302 indices = max_t(unsigned int, dcb_max, indices);
7303 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 7312 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
7304 if (!netdev) { 7313 if (!netdev) {
7305 err = -ENOMEM; 7314 err = -ENOMEM;
@@ -7454,13 +7463,17 @@ skip_sriov:
7454 7463
7455#ifdef IXGBE_FCOE 7464#ifdef IXGBE_FCOE
7456 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 7465 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7466 unsigned int fcoe_l;
7467
7457 if (hw->mac.ops.get_device_caps) { 7468 if (hw->mac.ops.get_device_caps) {
7458 hw->mac.ops.get_device_caps(hw, &device_caps); 7469 hw->mac.ops.get_device_caps(hw, &device_caps);
7459 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 7470 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7460 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 7471 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
7461 } 7472 }
7462 7473
7463 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; 7474
7475 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
7476 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
7464 7477
7465 netdev->features |= NETIF_F_FSO | 7478 netdev->features |= NETIF_F_FSO |
7466 NETIF_F_FCOE_CRC; 7479 NETIF_F_FCOE_CRC;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index eb534a071fde..060d2ad2ac96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
852 852
853 status = hw->phy.ops.read_i2c_eeprom(hw, 853 status = hw->phy.ops.read_i2c_eeprom(hw,
854 IXGBE_SFF_IDENTIFIER, 854 IXGBE_SFF_IDENTIFIER,
855 &identifier); 855 &identifier);
856 856
857 if (status == IXGBE_ERR_SWFW_SYNC || 857 if (status != 0)
858 status == IXGBE_ERR_I2C ||
859 status == IXGBE_ERR_SFP_NOT_PRESENT)
860 goto err_read_i2c_eeprom; 858 goto err_read_i2c_eeprom;
861 859
862 /* LAN ID is needed for sfp_type determination */ 860 /* LAN ID is needed for sfp_type determination */
@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
870 IXGBE_SFF_1GBE_COMP_CODES, 868 IXGBE_SFF_1GBE_COMP_CODES,
871 &comp_codes_1g); 869 &comp_codes_1g);
872 870
873 if (status == IXGBE_ERR_SWFW_SYNC || 871 if (status != 0)
874 status == IXGBE_ERR_I2C ||
875 status == IXGBE_ERR_SFP_NOT_PRESENT)
876 goto err_read_i2c_eeprom; 872 goto err_read_i2c_eeprom;
877 873
878 status = hw->phy.ops.read_i2c_eeprom(hw, 874 status = hw->phy.ops.read_i2c_eeprom(hw,
879 IXGBE_SFF_10GBE_COMP_CODES, 875 IXGBE_SFF_10GBE_COMP_CODES,
880 &comp_codes_10g); 876 &comp_codes_10g);
881 877
882 if (status == IXGBE_ERR_SWFW_SYNC || 878 if (status != 0)
883 status == IXGBE_ERR_I2C ||
884 status == IXGBE_ERR_SFP_NOT_PRESENT)
885 goto err_read_i2c_eeprom; 879 goto err_read_i2c_eeprom;
886 status = hw->phy.ops.read_i2c_eeprom(hw, 880 status = hw->phy.ops.read_i2c_eeprom(hw,
887 IXGBE_SFF_CABLE_TECHNOLOGY, 881 IXGBE_SFF_CABLE_TECHNOLOGY,
888 &cable_tech); 882 &cable_tech);
889 883
890 if (status == IXGBE_ERR_SWFW_SYNC || 884 if (status != 0)
891 status == IXGBE_ERR_I2C ||
892 status == IXGBE_ERR_SFP_NOT_PRESENT)
893 goto err_read_i2c_eeprom; 885 goto err_read_i2c_eeprom;
894 886
895 /* ID Module 887 /* ID Module
@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
984 if (hw->phy.type != ixgbe_phy_nl) { 976 if (hw->phy.type != ixgbe_phy_nl) {
985 hw->phy.id = identifier; 977 hw->phy.id = identifier;
986 status = hw->phy.ops.read_i2c_eeprom(hw, 978 status = hw->phy.ops.read_i2c_eeprom(hw,
987 IXGBE_SFF_VENDOR_OUI_BYTE0, 979 IXGBE_SFF_VENDOR_OUI_BYTE0,
988 &oui_bytes[0]); 980 &oui_bytes[0]);
989 981
990 if (status == IXGBE_ERR_SWFW_SYNC || 982 if (status != 0)
991 status == IXGBE_ERR_I2C ||
992 status == IXGBE_ERR_SFP_NOT_PRESENT)
993 goto err_read_i2c_eeprom; 983 goto err_read_i2c_eeprom;
994 984
995 status = hw->phy.ops.read_i2c_eeprom(hw, 985 status = hw->phy.ops.read_i2c_eeprom(hw,
996 IXGBE_SFF_VENDOR_OUI_BYTE1, 986 IXGBE_SFF_VENDOR_OUI_BYTE1,
997 &oui_bytes[1]); 987 &oui_bytes[1]);
998 988
999 if (status == IXGBE_ERR_SWFW_SYNC || 989 if (status != 0)
1000 status == IXGBE_ERR_I2C ||
1001 status == IXGBE_ERR_SFP_NOT_PRESENT)
1002 goto err_read_i2c_eeprom; 990 goto err_read_i2c_eeprom;
1003 991
1004 status = hw->phy.ops.read_i2c_eeprom(hw, 992 status = hw->phy.ops.read_i2c_eeprom(hw,
1005 IXGBE_SFF_VENDOR_OUI_BYTE2, 993 IXGBE_SFF_VENDOR_OUI_BYTE2,
1006 &oui_bytes[2]); 994 &oui_bytes[2]);
1007 995
1008 if (status == IXGBE_ERR_SWFW_SYNC || 996 if (status != 0)
1009 status == IXGBE_ERR_I2C ||
1010 status == IXGBE_ERR_SFP_NOT_PRESENT)
1011 goto err_read_i2c_eeprom; 997 goto err_read_i2c_eeprom;
1012 998
1013 vendor_oui = 999 vendor_oui =
@@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1307 break; 1293 break;
1308 1294
1309fail: 1295fail:
1296 ixgbe_i2c_bus_clear(hw);
1310 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 1297 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1311 msleep(100); 1298 msleep(100);
1312 ixgbe_i2c_bus_clear(hw);
1313 retry++; 1299 retry++;
1314 if (retry < max_retry) 1300 if (retry < max_retry)
1315 hw_dbg(hw, "I2C byte read error - Retrying.\n"); 1301 hw_dbg(hw, "I2C byte read error - Retrying.\n");