diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2013-02-09 07:49:21 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2013-02-16 00:46:37 -0500 |
commit | 6cfbd97b3e891ed5a70b43b7a237341f4c09cbf1 (patch) | |
tree | 4e2188cb90fd4360d1850144b115d7b0f1d83a56 /drivers/net/ethernet/intel | |
parent | efd9450e7e36717f24dff3bd584faa80a85231d6 (diff) |
e1000: fix whitespace issues and multi-line comments
Fixes whitespace issues, such as lines exceeding 80 chars, needless blank
lines and the use of spaces where tabs are needed. In addition, fix
multi-line comments to align with the networking standard.
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000.h | 65 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 140 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_hw.c | 558 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_main.c | 322 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000/e1000_param.c | 29 |
5 files changed, 586 insertions, 528 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 2b6cd02bfba0..26d9cd59ec75 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h | |||
@@ -81,68 +81,69 @@ struct e1000_adapter; | |||
81 | 81 | ||
82 | #include "e1000_hw.h" | 82 | #include "e1000_hw.h" |
83 | 83 | ||
84 | #define E1000_MAX_INTR 10 | 84 | #define E1000_MAX_INTR 10 |
85 | 85 | ||
86 | /* TX/RX descriptor defines */ | 86 | /* TX/RX descriptor defines */ |
87 | #define E1000_DEFAULT_TXD 256 | 87 | #define E1000_DEFAULT_TXD 256 |
88 | #define E1000_MAX_TXD 256 | 88 | #define E1000_MAX_TXD 256 |
89 | #define E1000_MIN_TXD 48 | 89 | #define E1000_MIN_TXD 48 |
90 | #define E1000_MAX_82544_TXD 4096 | 90 | #define E1000_MAX_82544_TXD 4096 |
91 | 91 | ||
92 | #define E1000_DEFAULT_RXD 256 | 92 | #define E1000_DEFAULT_RXD 256 |
93 | #define E1000_MAX_RXD 256 | 93 | #define E1000_MAX_RXD 256 |
94 | #define E1000_MIN_RXD 48 | 94 | #define E1000_MIN_RXD 48 |
95 | #define E1000_MAX_82544_RXD 4096 | 95 | #define E1000_MAX_82544_RXD 4096 |
96 | 96 | ||
97 | #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ | 97 | #define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ |
98 | #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ | 98 | #define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ |
99 | 99 | ||
100 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | 100 | /* this is the size past which hardware will drop packets when setting LPE=0 */ |
101 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | 101 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
102 | 102 | ||
103 | /* Supported Rx Buffer Sizes */ | 103 | /* Supported Rx Buffer Sizes */ |
104 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ | 104 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ |
105 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ | 105 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ |
106 | #define E1000_RXBUFFER_512 512 | 106 | #define E1000_RXBUFFER_512 512 |
107 | #define E1000_RXBUFFER_1024 1024 | 107 | #define E1000_RXBUFFER_1024 1024 |
108 | #define E1000_RXBUFFER_2048 2048 | 108 | #define E1000_RXBUFFER_2048 2048 |
109 | #define E1000_RXBUFFER_4096 4096 | 109 | #define E1000_RXBUFFER_4096 4096 |
110 | #define E1000_RXBUFFER_8192 8192 | 110 | #define E1000_RXBUFFER_8192 8192 |
111 | #define E1000_RXBUFFER_16384 16384 | 111 | #define E1000_RXBUFFER_16384 16384 |
112 | 112 | ||
113 | /* SmartSpeed delimiters */ | 113 | /* SmartSpeed delimiters */ |
114 | #define E1000_SMARTSPEED_DOWNSHIFT 3 | 114 | #define E1000_SMARTSPEED_DOWNSHIFT 3 |
115 | #define E1000_SMARTSPEED_MAX 15 | 115 | #define E1000_SMARTSPEED_MAX 15 |
116 | 116 | ||
117 | /* Packet Buffer allocations */ | 117 | /* Packet Buffer allocations */ |
118 | #define E1000_PBA_BYTES_SHIFT 0xA | 118 | #define E1000_PBA_BYTES_SHIFT 0xA |
119 | #define E1000_TX_HEAD_ADDR_SHIFT 7 | 119 | #define E1000_TX_HEAD_ADDR_SHIFT 7 |
120 | #define E1000_PBA_TX_MASK 0xFFFF0000 | 120 | #define E1000_PBA_TX_MASK 0xFFFF0000 |
121 | 121 | ||
122 | /* Flow Control Watermarks */ | 122 | /* Flow Control Watermarks */ |
123 | #define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ | 123 | #define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ |
124 | #define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ | 124 | #define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ |
125 | 125 | ||
126 | #define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ | 126 | #define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ |
127 | 127 | ||
128 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ | 128 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ |
129 | #define E1000_TX_QUEUE_WAKE 16 | 129 | #define E1000_TX_QUEUE_WAKE 16 |
130 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 130 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
131 | #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | 131 | #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
132 | 132 | ||
133 | #define AUTO_ALL_MODES 0 | 133 | #define AUTO_ALL_MODES 0 |
134 | #define E1000_EEPROM_82544_APM 0x0004 | 134 | #define E1000_EEPROM_82544_APM 0x0004 |
135 | #define E1000_EEPROM_APME 0x0400 | 135 | #define E1000_EEPROM_APME 0x0400 |
136 | 136 | ||
137 | #ifndef E1000_MASTER_SLAVE | 137 | #ifndef E1000_MASTER_SLAVE |
138 | /* Switch to override PHY master/slave setting */ | 138 | /* Switch to override PHY master/slave setting */ |
139 | #define E1000_MASTER_SLAVE e1000_ms_hw_default | 139 | #define E1000_MASTER_SLAVE e1000_ms_hw_default |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | #define E1000_MNG_VLAN_NONE (-1) | 142 | #define E1000_MNG_VLAN_NONE (-1) |
143 | 143 | ||
144 | /* wrapper around a pointer to a socket buffer, | 144 | /* wrapper around a pointer to a socket buffer, |
145 | * so a DMA handle can be stored along with the buffer */ | 145 | * so a DMA handle can be stored along with the buffer |
146 | */ | ||
146 | struct e1000_buffer { | 147 | struct e1000_buffer { |
147 | struct sk_buff *skb; | 148 | struct sk_buff *skb; |
148 | dma_addr_t dma; | 149 | dma_addr_t dma; |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 14e30515f6aa..43462d596a4e 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev, | |||
115 | if (hw->media_type == e1000_media_type_copper) { | 115 | if (hw->media_type == e1000_media_type_copper) { |
116 | 116 | ||
117 | ecmd->supported = (SUPPORTED_10baseT_Half | | 117 | ecmd->supported = (SUPPORTED_10baseT_Half | |
118 | SUPPORTED_10baseT_Full | | 118 | SUPPORTED_10baseT_Full | |
119 | SUPPORTED_100baseT_Half | | 119 | SUPPORTED_100baseT_Half | |
120 | SUPPORTED_100baseT_Full | | 120 | SUPPORTED_100baseT_Full | |
121 | SUPPORTED_1000baseT_Full| | 121 | SUPPORTED_1000baseT_Full| |
122 | SUPPORTED_Autoneg | | 122 | SUPPORTED_Autoneg | |
123 | SUPPORTED_TP); | 123 | SUPPORTED_TP); |
124 | ecmd->advertising = ADVERTISED_TP; | 124 | ecmd->advertising = ADVERTISED_TP; |
125 | 125 | ||
126 | if (hw->autoneg == 1) { | 126 | if (hw->autoneg == 1) { |
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev, | |||
161 | ethtool_cmd_speed_set(ecmd, adapter->link_speed); | 161 | ethtool_cmd_speed_set(ecmd, adapter->link_speed); |
162 | 162 | ||
163 | /* unfortunately FULL_DUPLEX != DUPLEX_FULL | 163 | /* unfortunately FULL_DUPLEX != DUPLEX_FULL |
164 | * and HALF_DUPLEX != DUPLEX_HALF */ | 164 | * and HALF_DUPLEX != DUPLEX_HALF |
165 | 165 | */ | |
166 | if (adapter->link_duplex == FULL_DUPLEX) | 166 | if (adapter->link_duplex == FULL_DUPLEX) |
167 | ecmd->duplex = DUPLEX_FULL; | 167 | ecmd->duplex = DUPLEX_FULL; |
168 | else | 168 | else |
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev, | |||
179 | if ((hw->media_type == e1000_media_type_copper) && | 179 | if ((hw->media_type == e1000_media_type_copper) && |
180 | netif_carrier_ok(netdev)) | 180 | netif_carrier_ok(netdev)) |
181 | ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? | 181 | ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? |
182 | ETH_TP_MDI_X : | 182 | ETH_TP_MDI_X : ETH_TP_MDI); |
183 | ETH_TP_MDI); | ||
184 | else | 183 | else |
185 | ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; | 184 | ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; |
186 | 185 | ||
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev, | |||
197 | struct e1000_adapter *adapter = netdev_priv(netdev); | 196 | struct e1000_adapter *adapter = netdev_priv(netdev); |
198 | struct e1000_hw *hw = &adapter->hw; | 197 | struct e1000_hw *hw = &adapter->hw; |
199 | 198 | ||
200 | /* | 199 | /* MDI setting is only allowed when autoneg enabled because |
201 | * MDI setting is only allowed when autoneg enabled because | ||
202 | * some hardware doesn't allow MDI setting when speed or | 200 | * some hardware doesn't allow MDI setting when speed or |
203 | * duplex is forced. | 201 | * duplex is forced. |
204 | */ | 202 | */ |
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
224 | ADVERTISED_Autoneg; | 222 | ADVERTISED_Autoneg; |
225 | else | 223 | else |
226 | hw->autoneg_advertised = ecmd->advertising | | 224 | hw->autoneg_advertised = ecmd->advertising | |
227 | ADVERTISED_TP | | 225 | ADVERTISED_TP | |
228 | ADVERTISED_Autoneg; | 226 | ADVERTISED_Autoneg; |
229 | ecmd->advertising = hw->autoneg_advertised; | 227 | ecmd->advertising = hw->autoneg_advertised; |
230 | } else { | 228 | } else { |
231 | u32 speed = ethtool_cmd_speed(ecmd); | 229 | u32 speed = ethtool_cmd_speed(ecmd); |
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev) | |||
260 | { | 258 | { |
261 | struct e1000_adapter *adapter = netdev_priv(netdev); | 259 | struct e1000_adapter *adapter = netdev_priv(netdev); |
262 | 260 | ||
263 | /* | 261 | /* If the link is not reported up to netdev, interrupts are disabled, |
264 | * If the link is not reported up to netdev, interrupts are disabled, | ||
265 | * and so the physical link state may have changed since we last | 262 | * and so the physical link state may have changed since we last |
266 | * looked. Set get_link_status to make sure that the true link | 263 | * looked. Set get_link_status to make sure that the true link |
267 | * state is interrogated, rather than pulling a cached and possibly | 264 | * state is interrogated, rather than pulling a cached and possibly |
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev, | |||
484 | le16_to_cpus(&eeprom_buff[i]); | 481 | le16_to_cpus(&eeprom_buff[i]); |
485 | 482 | ||
486 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), | 483 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), |
487 | eeprom->len); | 484 | eeprom->len); |
488 | kfree(eeprom_buff); | 485 | kfree(eeprom_buff); |
489 | 486 | ||
490 | return ret_val; | 487 | return ret_val; |
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev, | |||
517 | ptr = (void *)eeprom_buff; | 514 | ptr = (void *)eeprom_buff; |
518 | 515 | ||
519 | if (eeprom->offset & 1) { | 516 | if (eeprom->offset & 1) { |
520 | /* need read/modify/write of first changed EEPROM word */ | 517 | /* need read/modify/write of first changed EEPROM word |
521 | /* only the second byte of the word is being modified */ | 518 | * only the second byte of the word is being modified |
519 | */ | ||
522 | ret_val = e1000_read_eeprom(hw, first_word, 1, | 520 | ret_val = e1000_read_eeprom(hw, first_word, 1, |
523 | &eeprom_buff[0]); | 521 | &eeprom_buff[0]); |
524 | ptr++; | 522 | ptr++; |
525 | } | 523 | } |
526 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { | 524 | if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { |
527 | /* need read/modify/write of last changed EEPROM word */ | 525 | /* need read/modify/write of last changed EEPROM word |
528 | /* only the first byte of the word is being modified */ | 526 | * only the first byte of the word is being modified |
527 | */ | ||
529 | ret_val = e1000_read_eeprom(hw, last_word, 1, | 528 | ret_val = e1000_read_eeprom(hw, last_word, 1, |
530 | &eeprom_buff[last_word - first_word]); | 529 | &eeprom_buff[last_word - first_word]); |
531 | } | 530 | } |
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
606 | rx_old = adapter->rx_ring; | 605 | rx_old = adapter->rx_ring; |
607 | 606 | ||
608 | err = -ENOMEM; | 607 | err = -ENOMEM; |
609 | txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL); | 608 | txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), |
609 | GFP_KERNEL); | ||
610 | if (!txdr) | 610 | if (!txdr) |
611 | goto err_alloc_tx; | 611 | goto err_alloc_tx; |
612 | 612 | ||
613 | rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL); | 613 | rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), |
614 | GFP_KERNEL); | ||
614 | if (!rxdr) | 615 | if (!rxdr) |
615 | goto err_alloc_rx; | 616 | goto err_alloc_rx; |
616 | 617 | ||
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
619 | 620 | ||
620 | rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); | 621 | rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); |
621 | rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? | 622 | rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? |
622 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); | 623 | E1000_MAX_RXD : E1000_MAX_82544_RXD)); |
623 | rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); | 624 | rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); |
624 | 625 | ||
625 | txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); | 626 | txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); |
626 | txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? | 627 | txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? |
627 | E1000_MAX_TXD : E1000_MAX_82544_TXD)); | 628 | E1000_MAX_TXD : E1000_MAX_82544_TXD)); |
628 | txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); | 629 | txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); |
629 | 630 | ||
630 | for (i = 0; i < adapter->num_tx_queues; i++) | 631 | for (i = 0; i < adapter->num_tx_queues; i++) |
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
642 | goto err_setup_tx; | 643 | goto err_setup_tx; |
643 | 644 | ||
644 | /* save the new, restore the old in order to free it, | 645 | /* save the new, restore the old in order to free it, |
645 | * then restore the new back again */ | 646 | * then restore the new back again |
647 | */ | ||
646 | 648 | ||
647 | adapter->rx_ring = rx_old; | 649 | adapter->rx_ring = rx_old; |
648 | adapter->tx_ring = tx_old; | 650 | adapter->tx_ring = tx_old; |
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
784 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 786 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
785 | 787 | ||
786 | if (hw->mac_type >= e1000_82543) { | 788 | if (hw->mac_type >= e1000_82543) { |
787 | |||
788 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); | 789 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); |
789 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 790 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
790 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | 791 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); |
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
795 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 796 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
796 | 0xFFFFFFFF); | 797 | 0xFFFFFFFF); |
797 | } | 798 | } |
798 | |||
799 | } else { | 799 | } else { |
800 | |||
801 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); | 800 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); |
802 | REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); | 801 | REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); |
803 | REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); | 802 | REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); |
804 | REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); | 803 | REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); |
805 | |||
806 | } | 804 | } |
807 | 805 | ||
808 | value = E1000_MC_TBL_SIZE; | 806 | value = E1000_MC_TBL_SIZE; |
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
858 | 856 | ||
859 | *data = 0; | 857 | *data = 0; |
860 | 858 | ||
861 | /* NOTE: we don't test MSI interrupts here, yet */ | 859 | /* NOTE: we don't test MSI interrupts here, yet |
862 | /* Hook up test interrupt handler just for this test */ | 860 | * Hook up test interrupt handler just for this test |
861 | */ | ||
863 | if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, | 862 | if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, |
864 | netdev)) | 863 | netdev)) |
865 | shared_int = false; | 864 | shared_int = false; |
866 | else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, | 865 | else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, |
867 | netdev->name, netdev)) { | 866 | netdev->name, netdev)) { |
868 | *data = 1; | 867 | *data = 1; |
869 | return -1; | 868 | return -1; |
870 | } | 869 | } |
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1253 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1252 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ |
1254 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1253 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
1255 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | 1254 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ |
1256 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1255 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1257 | 1256 | ||
1258 | if (hw->media_type == e1000_media_type_copper && | 1257 | if (hw->media_type == e1000_media_type_copper && |
1259 | hw->phy_type == e1000_phy_m88) | 1258 | hw->phy_type == e1000_phy_m88) |
1260 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | 1259 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ |
1261 | else { | 1260 | else { |
1262 | /* Set the ILOS bit on the fiber Nic is half | 1261 | /* Set the ILOS bit on the fiber Nic is half |
1263 | * duplex link is detected. */ | 1262 | * duplex link is detected. |
1263 | */ | ||
1264 | stat_reg = er32(STATUS); | 1264 | stat_reg = er32(STATUS); |
1265 | if ((stat_reg & E1000_STATUS_FD) == 0) | 1265 | if ((stat_reg & E1000_STATUS_FD) == 0) |
1266 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | 1266 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); |
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1446 | 1446 | ||
1447 | ret_val = e1000_check_lbtest_frame( | 1447 | ret_val = e1000_check_lbtest_frame( |
1448 | rxdr->buffer_info[l].skb, | 1448 | rxdr->buffer_info[l].skb, |
1449 | 1024); | 1449 | 1024); |
1450 | if (!ret_val) | 1450 | if (!ret_val) |
1451 | good_cnt++; | 1451 | good_cnt++; |
1452 | if (unlikely(++l == rxdr->count)) l = 0; | 1452 | if (unlikely(++l == rxdr->count)) l = 0; |
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) | |||
1493 | hw->serdes_has_link = false; | 1493 | hw->serdes_has_link = false; |
1494 | 1494 | ||
1495 | /* On some blade server designs, link establishment | 1495 | /* On some blade server designs, link establishment |
1496 | * could take as long as 2-3 minutes */ | 1496 | * could take as long as 2-3 minutes |
1497 | */ | ||
1497 | do { | 1498 | do { |
1498 | e1000_check_for_link(hw); | 1499 | e1000_check_for_link(hw); |
1499 | if (hw->serdes_has_link) | 1500 | if (hw->serdes_has_link) |
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1545 | e_info(hw, "offline testing starting\n"); | 1546 | e_info(hw, "offline testing starting\n"); |
1546 | 1547 | ||
1547 | /* Link test performed before hardware reset so autoneg doesn't | 1548 | /* Link test performed before hardware reset so autoneg doesn't |
1548 | * interfere with test result */ | 1549 | * interfere with test result |
1550 | */ | ||
1549 | if (e1000_link_test(adapter, &data[4])) | 1551 | if (e1000_link_test(adapter, &data[4])) |
1550 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1552 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1551 | 1553 | ||
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, | |||
1639 | default: | 1641 | default: |
1640 | /* dual port cards only support WoL on port A from now on | 1642 | /* dual port cards only support WoL on port A from now on |
1641 | * unless it was enabled in the eeprom for port B | 1643 | * unless it was enabled in the eeprom for port B |
1642 | * so exclude FUNC_1 ports from having WoL enabled */ | 1644 | * so exclude FUNC_1 ports from having WoL enabled |
1645 | */ | ||
1643 | if (er32(STATUS) & E1000_STATUS_FUNC_1 && | 1646 | if (er32(STATUS) & E1000_STATUS_FUNC_1 && |
1644 | !adapter->eeprom_wol) { | 1647 | !adapter->eeprom_wol) { |
1645 | wol->supported = 0; | 1648 | wol->supported = 0; |
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev, | |||
1663 | wol->wolopts = 0; | 1666 | wol->wolopts = 0; |
1664 | 1667 | ||
1665 | /* this function will set ->supported = 0 and return 1 if wol is not | 1668 | /* this function will set ->supported = 0 and return 1 if wol is not |
1666 | * supported by this hardware */ | 1669 | * supported by this hardware |
1670 | */ | ||
1667 | if (e1000_wol_exclusion(adapter, wol) || | 1671 | if (e1000_wol_exclusion(adapter, wol) || |
1668 | !device_can_wakeup(&adapter->pdev->dev)) | 1672 | !device_can_wakeup(&adapter->pdev->dev)) |
1669 | return; | 1673 | return; |
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, | |||
1839 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1843 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1840 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1844 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1841 | } | 1845 | } |
1842 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1846 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1843 | } | 1847 | } |
1844 | 1848 | ||
1845 | static void e1000_get_strings(struct net_device *netdev, u32 stringset, | 1849 | static void e1000_get_strings(struct net_device *netdev, u32 stringset, |
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, | |||
1859 | ETH_GSTRING_LEN); | 1863 | ETH_GSTRING_LEN); |
1860 | p += ETH_GSTRING_LEN; | 1864 | p += ETH_GSTRING_LEN; |
1861 | } | 1865 | } |
1862 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ | 1866 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ |
1863 | break; | 1867 | break; |
1864 | } | 1868 | } |
1865 | } | 1869 | } |
1866 | 1870 | ||
1867 | static const struct ethtool_ops e1000_ethtool_ops = { | 1871 | static const struct ethtool_ops e1000_ethtool_ops = { |
1868 | .get_settings = e1000_get_settings, | 1872 | .get_settings = e1000_get_settings, |
1869 | .set_settings = e1000_set_settings, | 1873 | .set_settings = e1000_set_settings, |
1870 | .get_drvinfo = e1000_get_drvinfo, | 1874 | .get_drvinfo = e1000_get_drvinfo, |
1871 | .get_regs_len = e1000_get_regs_len, | 1875 | .get_regs_len = e1000_get_regs_len, |
1872 | .get_regs = e1000_get_regs, | 1876 | .get_regs = e1000_get_regs, |
1873 | .get_wol = e1000_get_wol, | 1877 | .get_wol = e1000_get_wol, |
1874 | .set_wol = e1000_set_wol, | 1878 | .set_wol = e1000_set_wol, |
1875 | .get_msglevel = e1000_get_msglevel, | 1879 | .get_msglevel = e1000_get_msglevel, |
1876 | .set_msglevel = e1000_set_msglevel, | 1880 | .set_msglevel = e1000_set_msglevel, |
1877 | .nway_reset = e1000_nway_reset, | 1881 | .nway_reset = e1000_nway_reset, |
1878 | .get_link = e1000_get_link, | 1882 | .get_link = e1000_get_link, |
1879 | .get_eeprom_len = e1000_get_eeprom_len, | 1883 | .get_eeprom_len = e1000_get_eeprom_len, |
1880 | .get_eeprom = e1000_get_eeprom, | 1884 | .get_eeprom = e1000_get_eeprom, |
1881 | .set_eeprom = e1000_set_eeprom, | 1885 | .set_eeprom = e1000_set_eeprom, |
1882 | .get_ringparam = e1000_get_ringparam, | 1886 | .get_ringparam = e1000_get_ringparam, |
1883 | .set_ringparam = e1000_set_ringparam, | 1887 | .set_ringparam = e1000_set_ringparam, |
1884 | .get_pauseparam = e1000_get_pauseparam, | 1888 | .get_pauseparam = e1000_get_pauseparam, |
1885 | .set_pauseparam = e1000_set_pauseparam, | 1889 | .set_pauseparam = e1000_set_pauseparam, |
1886 | .self_test = e1000_diag_test, | 1890 | .self_test = e1000_diag_test, |
1887 | .get_strings = e1000_get_strings, | 1891 | .get_strings = e1000_get_strings, |
1888 | .set_phys_id = e1000_set_phys_id, | 1892 | .set_phys_id = e1000_set_phys_id, |
1889 | .get_ethtool_stats = e1000_get_ethtool_stats, | 1893 | .get_ethtool_stats = e1000_get_ethtool_stats, |
1890 | .get_sset_count = e1000_get_sset_count, | 1894 | .get_sset_count = e1000_get_sset_count, |
1891 | .get_coalesce = e1000_get_coalesce, | 1895 | .get_coalesce = e1000_get_coalesce, |
1892 | .set_coalesce = e1000_set_coalesce, | 1896 | .set_coalesce = e1000_set_coalesce, |
1893 | .get_ts_info = ethtool_op_get_ts_info, | 1897 | .get_ts_info = ethtool_op_get_ts_info, |
1894 | }; | 1898 | }; |
1895 | 1899 | ||
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 8fedd2451538..2879b9631e15 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c | |||
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw) | |||
164 | if (hw->phy_init_script) { | 164 | if (hw->phy_init_script) { |
165 | msleep(20); | 165 | msleep(20); |
166 | 166 | ||
167 | /* Save off the current value of register 0x2F5B to be restored at | 167 | /* Save off the current value of register 0x2F5B to be restored |
168 | * the end of this routine. */ | 168 | * at the end of this routine. |
169 | */ | ||
169 | ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); | 170 | ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); |
170 | 171 | ||
171 | /* Disabled the PHY transmitter */ | 172 | /* Disabled the PHY transmitter */ |
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
466 | case e1000_82541: | 467 | case e1000_82541: |
467 | case e1000_82541_rev_2: | 468 | case e1000_82541_rev_2: |
468 | /* These controllers can't ack the 64-bit write when issuing the | 469 | /* These controllers can't ack the 64-bit write when issuing the |
469 | * reset, so use IO-mapping as a workaround to issue the reset */ | 470 | * reset, so use IO-mapping as a workaround to issue the reset |
471 | */ | ||
470 | E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 472 | E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); |
471 | break; | 473 | break; |
472 | case e1000_82545_rev_3: | 474 | case e1000_82545_rev_3: |
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
480 | break; | 482 | break; |
481 | } | 483 | } |
482 | 484 | ||
483 | /* After MAC reset, force reload of EEPROM to restore power-on settings to | 485 | /* After MAC reset, force reload of EEPROM to restore power-on settings |
484 | * device. Later controllers reload the EEPROM automatically, so just wait | 486 | * to device. Later controllers reload the EEPROM automatically, so |
485 | * for reload to complete. | 487 | * just wait for reload to complete. |
486 | */ | 488 | */ |
487 | switch (hw->mac_type) { | 489 | switch (hw->mac_type) { |
488 | case e1000_82542_rev2_0: | 490 | case e1000_82542_rev2_0: |
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
591 | msleep(5); | 593 | msleep(5); |
592 | } | 594 | } |
593 | 595 | ||
594 | /* Setup the receive address. This involves initializing all of the Receive | 596 | /* Setup the receive address. This involves initializing all of the |
595 | * Address Registers (RARs 0 - 15). | 597 | * Receive Address Registers (RARs 0 - 15). |
596 | */ | 598 | */ |
597 | e1000_init_rx_addrs(hw); | 599 | e1000_init_rx_addrs(hw); |
598 | 600 | ||
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
611 | for (i = 0; i < mta_size; i++) { | 613 | for (i = 0; i < mta_size; i++) { |
612 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 614 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
613 | /* use write flush to prevent Memory Write Block (MWB) from | 615 | /* use write flush to prevent Memory Write Block (MWB) from |
614 | * occurring when accessing our register space */ | 616 | * occurring when accessing our register space |
617 | */ | ||
615 | E1000_WRITE_FLUSH(); | 618 | E1000_WRITE_FLUSH(); |
616 | } | 619 | } |
617 | 620 | ||
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
630 | case e1000_82546_rev_3: | 633 | case e1000_82546_rev_3: |
631 | break; | 634 | break; |
632 | default: | 635 | default: |
633 | /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ | 636 | /* Workaround for PCI-X problem when BIOS sets MMRBC |
637 | * incorrectly. | ||
638 | */ | ||
634 | if (hw->bus_type == e1000_bus_type_pcix | 639 | if (hw->bus_type == e1000_bus_type_pcix |
635 | && e1000_pcix_get_mmrbc(hw) > 2048) | 640 | && e1000_pcix_get_mmrbc(hw) > 2048) |
636 | e1000_pcix_set_mmrbc(hw, 2048); | 641 | e1000_pcix_set_mmrbc(hw, 2048); |
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw) | |||
660 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { | 665 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { |
661 | ctrl_ext = er32(CTRL_EXT); | 666 | ctrl_ext = er32(CTRL_EXT); |
662 | /* Relaxed ordering must be disabled to avoid a parity | 667 | /* Relaxed ordering must be disabled to avoid a parity |
663 | * error crash in a PCI slot. */ | 668 | * error crash in a PCI slot. |
669 | */ | ||
664 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | 670 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; |
665 | ew32(CTRL_EXT, ctrl_ext); | 671 | ew32(CTRL_EXT, ctrl_ext); |
666 | } | 672 | } |
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw) | |||
810 | ew32(FCRTL, 0); | 816 | ew32(FCRTL, 0); |
811 | ew32(FCRTH, 0); | 817 | ew32(FCRTH, 0); |
812 | } else { | 818 | } else { |
813 | /* We need to set up the Receive Threshold high and low water marks | 819 | /* We need to set up the Receive Threshold high and low water |
814 | * as well as (optionally) enabling the transmission of XON frames. | 820 | * marks as well as (optionally) enabling the transmission of |
821 | * XON frames. | ||
815 | */ | 822 | */ |
816 | if (hw->fc_send_xon) { | 823 | if (hw->fc_send_xon) { |
817 | ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); | 824 | ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); |
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
868 | e1000_config_collision_dist(hw); | 875 | e1000_config_collision_dist(hw); |
869 | 876 | ||
870 | /* Check for a software override of the flow control settings, and setup | 877 | /* Check for a software override of the flow control settings, and setup |
871 | * the device accordingly. If auto-negotiation is enabled, then software | 878 | * the device accordingly. If auto-negotiation is enabled, then |
872 | * will have to set the "PAUSE" bits to the correct value in the Tranmsit | 879 | * software will have to set the "PAUSE" bits to the correct value in |
873 | * Config Word Register (TXCW) and re-start auto-negotiation. However, if | 880 | * the Tranmsit Config Word Register (TXCW) and re-start |
874 | * auto-negotiation is disabled, then software will have to manually | 881 | * auto-negotiation. However, if auto-negotiation is disabled, then |
875 | * configure the two flow control enable bits in the CTRL register. | 882 | * software will have to manually configure the two flow control enable |
883 | * bits in the CTRL register. | ||
876 | * | 884 | * |
877 | * The possible values of the "fc" parameter are: | 885 | * The possible values of the "fc" parameter are: |
878 | * 0: Flow control is completely disabled | 886 | * 0: Flow control is completely disabled |
879 | * 1: Rx flow control is enabled (we can receive pause frames, but | 887 | * 1: Rx flow control is enabled (we can receive pause frames, but |
880 | * not send pause frames). | 888 | * not send pause frames). |
881 | * 2: Tx flow control is enabled (we can send pause frames but we do | 889 | * 2: Tx flow control is enabled (we can send pause frames but we do |
882 | * not support receiving pause frames). | 890 | * not support receiving pause frames). |
883 | * 3: Both Rx and TX flow control (symmetric) are enabled. | 891 | * 3: Both Rx and TX flow control (symmetric) are enabled. |
884 | */ | 892 | */ |
885 | switch (hw->fc) { | 893 | switch (hw->fc) { |
886 | case E1000_FC_NONE: | 894 | case E1000_FC_NONE: |
887 | /* Flow control is completely disabled by a software over-ride. */ | 895 | /* Flow ctrl is completely disabled by a software over-ride */ |
888 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); | 896 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); |
889 | break; | 897 | break; |
890 | case E1000_FC_RX_PAUSE: | 898 | case E1000_FC_RX_PAUSE: |
891 | /* RX Flow control is enabled and TX Flow control is disabled by a | 899 | /* Rx Flow control is enabled and Tx Flow control is disabled by |
892 | * software over-ride. Since there really isn't a way to advertise | 900 | * a software over-ride. Since there really isn't a way to |
893 | * that we are capable of RX Pause ONLY, we will advertise that we | 901 | * advertise that we are capable of Rx Pause ONLY, we will |
894 | * support both symmetric and asymmetric RX PAUSE. Later, we will | 902 | * advertise that we support both symmetric and asymmetric Rx |
895 | * disable the adapter's ability to send PAUSE frames. | 903 | * PAUSE. Later, we will disable the adapter's ability to send |
904 | * PAUSE frames. | ||
896 | */ | 905 | */ |
897 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 906 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
898 | break; | 907 | break; |
899 | case E1000_FC_TX_PAUSE: | 908 | case E1000_FC_TX_PAUSE: |
900 | /* TX Flow control is enabled, and RX Flow control is disabled, by a | 909 | /* Tx Flow control is enabled, and Rx Flow control is disabled, |
901 | * software over-ride. | 910 | * by a software over-ride. |
902 | */ | 911 | */ |
903 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); | 912 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); |
904 | break; | 913 | break; |
905 | case E1000_FC_FULL: | 914 | case E1000_FC_FULL: |
906 | /* Flow control (both RX and TX) is enabled by a software over-ride. */ | 915 | /* Flow control (both Rx and Tx) is enabled by a software |
916 | * over-ride. | ||
917 | */ | ||
907 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 918 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
908 | break; | 919 | break; |
909 | default: | 920 | default: |
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
912 | break; | 923 | break; |
913 | } | 924 | } |
914 | 925 | ||
915 | /* Since auto-negotiation is enabled, take the link out of reset (the link | 926 | /* Since auto-negotiation is enabled, take the link out of reset (the |
916 | * will be in reset, because we previously reset the chip). This will | 927 | * link will be in reset, because we previously reset the chip). This |
917 | * restart auto-negotiation. If auto-negotiation is successful then the | 928 | * will restart auto-negotiation. If auto-negotiation is successful |
918 | * link-up status bit will be set and the flow control enable bits (RFCE | 929 | * then the link-up status bit will be set and the flow control enable |
919 | * and TFCE) will be set according to their negotiated value. | 930 | * bits (RFCE and TFCE) will be set according to their negotiated value. |
920 | */ | 931 | */ |
921 | e_dbg("Auto-negotiation enabled\n"); | 932 | e_dbg("Auto-negotiation enabled\n"); |
922 | 933 | ||
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
927 | hw->txcw = txcw; | 938 | hw->txcw = txcw; |
928 | msleep(1); | 939 | msleep(1); |
929 | 940 | ||
930 | /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" | 941 | /* If we have a signal (the cable is plugged in) then poll for a |
931 | * indication in the Device Status Register. Time-out if a link isn't | 942 | * "Link-Up" indication in the Device Status Register. Time-out if a |
932 | * seen in 500 milliseconds seconds (Auto-negotiation should complete in | 943 | * link isn't seen in 500 milliseconds seconds (Auto-negotiation should |
933 | * less than 500 milliseconds even if the other end is doing it in SW). | 944 | * complete in less than 500 milliseconds even if the other end is doing |
934 | * For internal serdes, we just assume a signal is present, then poll. | 945 | * it in SW). For internal serdes, we just assume a signal is present, |
946 | * then poll. | ||
935 | */ | 947 | */ |
936 | if (hw->media_type == e1000_media_type_internal_serdes || | 948 | if (hw->media_type == e1000_media_type_internal_serdes || |
937 | (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { | 949 | (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { |
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
946 | e_dbg("Never got a valid link from auto-neg!!!\n"); | 958 | e_dbg("Never got a valid link from auto-neg!!!\n"); |
947 | hw->autoneg_failed = 1; | 959 | hw->autoneg_failed = 1; |
948 | /* AutoNeg failed to achieve a link, so we'll call | 960 | /* AutoNeg failed to achieve a link, so we'll call |
949 | * e1000_check_for_link. This routine will force the link up if | 961 | * e1000_check_for_link. This routine will force the |
950 | * we detect a signal. This will allow us to communicate with | 962 | * link up if we detect a signal. This will allow us to |
951 | * non-autonegotiating link partners. | 963 | * communicate with non-autonegotiating link partners. |
952 | */ | 964 | */ |
953 | ret_val = e1000_check_for_link(hw); | 965 | ret_val = e1000_check_for_link(hw); |
954 | if (ret_val) { | 966 | if (ret_val) { |
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1042 | e_dbg("e1000_copper_link_preconfig"); | 1054 | e_dbg("e1000_copper_link_preconfig"); |
1043 | 1055 | ||
1044 | ctrl = er32(CTRL); | 1056 | ctrl = er32(CTRL); |
1045 | /* With 82543, we need to force speed and duplex on the MAC equal to what | 1057 | /* With 82543, we need to force speed and duplex on the MAC equal to |
1046 | * the PHY speed and duplex configuration is. In addition, we need to | 1058 | * what the PHY speed and duplex configuration is. In addition, we need |
1047 | * perform a hardware reset on the PHY to take it out of reset. | 1059 | * to perform a hardware reset on the PHY to take it out of reset. |
1048 | */ | 1060 | */ |
1049 | if (hw->mac_type > e1000_82543) { | 1061 | if (hw->mac_type > e1000_82543) { |
1050 | ctrl |= E1000_CTRL_SLU; | 1062 | ctrl |= E1000_CTRL_SLU; |
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1175 | 1187 | ||
1176 | /* when autonegotiation advertisement is only 1000Mbps then we | 1188 | /* when autonegotiation advertisement is only 1000Mbps then we |
1177 | * should disable SmartSpeed and enable Auto MasterSlave | 1189 | * should disable SmartSpeed and enable Auto MasterSlave |
1178 | * resolution as hardware default. */ | 1190 | * resolution as hardware default. |
1191 | */ | ||
1179 | if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { | 1192 | if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { |
1180 | /* Disable SmartSpeed */ | 1193 | /* Disable SmartSpeed */ |
1181 | ret_val = | 1194 | ret_val = |
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw) | |||
1485 | 1498 | ||
1486 | if (hw->autoneg) { | 1499 | if (hw->autoneg) { |
1487 | /* Setup autoneg and flow control advertisement | 1500 | /* Setup autoneg and flow control advertisement |
1488 | * and perform autonegotiation */ | 1501 | * and perform autonegotiation |
1502 | */ | ||
1489 | ret_val = e1000_copper_link_autoneg(hw); | 1503 | ret_val = e1000_copper_link_autoneg(hw); |
1490 | if (ret_val) | 1504 | if (ret_val) |
1491 | return ret_val; | 1505 | return ret_val; |
1492 | } else { | 1506 | } else { |
1493 | /* PHY will be set to 10H, 10F, 100H,or 100F | 1507 | /* PHY will be set to 10H, 10F, 100H,or 100F |
1494 | * depending on value from forced_speed_duplex. */ | 1508 | * depending on value from forced_speed_duplex. |
1509 | */ | ||
1495 | e_dbg("Forcing speed and duplex\n"); | 1510 | e_dbg("Forcing speed and duplex\n"); |
1496 | ret_val = e1000_phy_force_speed_duplex(hw); | 1511 | ret_val = e1000_phy_force_speed_duplex(hw); |
1497 | if (ret_val) { | 1512 | if (ret_val) { |
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1609 | * setup the PHY advertisement registers accordingly. If | 1624 | * setup the PHY advertisement registers accordingly. If |
1610 | * auto-negotiation is enabled, then software will have to set the | 1625 | * auto-negotiation is enabled, then software will have to set the |
1611 | * "PAUSE" bits to the correct value in the Auto-Negotiation | 1626 | * "PAUSE" bits to the correct value in the Auto-Negotiation |
1612 | * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. | 1627 | * Advertisement Register (PHY_AUTONEG_ADV) and re-start |
1628 | * auto-negotiation. | ||
1613 | * | 1629 | * |
1614 | * The possible values of the "fc" parameter are: | 1630 | * The possible values of the "fc" parameter are: |
1615 | * 0: Flow control is completely disabled | 1631 | * 0: Flow control is completely disabled |
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1636 | * capable of RX Pause ONLY, we will advertise that we | 1652 | * capable of RX Pause ONLY, we will advertise that we |
1637 | * support both symmetric and asymmetric RX PAUSE. Later | 1653 | * support both symmetric and asymmetric RX PAUSE. Later |
1638 | * (in e1000_config_fc_after_link_up) we will disable the | 1654 | * (in e1000_config_fc_after_link_up) we will disable the |
1639 | *hw's ability to send PAUSE frames. | 1655 | * hw's ability to send PAUSE frames. |
1640 | */ | 1656 | */ |
1641 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 1657 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
1642 | break; | 1658 | break; |
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1720 | /* Are we forcing Full or Half Duplex? */ | 1736 | /* Are we forcing Full or Half Duplex? */ |
1721 | if (hw->forced_speed_duplex == e1000_100_full || | 1737 | if (hw->forced_speed_duplex == e1000_100_full || |
1722 | hw->forced_speed_duplex == e1000_10_full) { | 1738 | hw->forced_speed_duplex == e1000_10_full) { |
1723 | /* We want to force full duplex so we SET the full duplex bits in the | 1739 | /* We want to force full duplex so we SET the full duplex bits |
1724 | * Device and MII Control Registers. | 1740 | * in the Device and MII Control Registers. |
1725 | */ | 1741 | */ |
1726 | ctrl |= E1000_CTRL_FD; | 1742 | ctrl |= E1000_CTRL_FD; |
1727 | mii_ctrl_reg |= MII_CR_FULL_DUPLEX; | 1743 | mii_ctrl_reg |= MII_CR_FULL_DUPLEX; |
1728 | e_dbg("Full Duplex\n"); | 1744 | e_dbg("Full Duplex\n"); |
1729 | } else { | 1745 | } else { |
1730 | /* We want to force half duplex so we CLEAR the full duplex bits in | 1746 | /* We want to force half duplex so we CLEAR the full duplex bits |
1731 | * the Device and MII Control Registers. | 1747 | * in the Device and MII Control Registers. |
1732 | */ | 1748 | */ |
1733 | ctrl &= ~E1000_CTRL_FD; | 1749 | ctrl &= ~E1000_CTRL_FD; |
1734 | mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; | 1750 | mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; |
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1762 | if (ret_val) | 1778 | if (ret_val) |
1763 | return ret_val; | 1779 | return ret_val; |
1764 | 1780 | ||
1765 | /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | 1781 | /* Clear Auto-Crossover to force MDI manually. M88E1000 requires |
1766 | * forced whenever speed are duplex are forced. | 1782 | * MDI forced whenever speed are duplex are forced. |
1767 | */ | 1783 | */ |
1768 | phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; | 1784 | phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; |
1769 | ret_val = | 1785 | ret_val = |
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1814 | e_dbg("Waiting for forced speed/duplex link.\n"); | 1830 | e_dbg("Waiting for forced speed/duplex link.\n"); |
1815 | mii_status_reg = 0; | 1831 | mii_status_reg = 0; |
1816 | 1832 | ||
1817 | /* We will wait for autoneg to complete or 4.5 seconds to expire. */ | 1833 | /* Wait for autoneg to complete or 4.5 seconds to expire */ |
1818 | for (i = PHY_FORCE_TIME; i > 0; i--) { | 1834 | for (i = PHY_FORCE_TIME; i > 0; i--) { |
1819 | /* Read the MII Status Register and wait for Auto-Neg Complete bit | 1835 | /* Read the MII Status Register and wait for Auto-Neg |
1820 | * to be set. | 1836 | * Complete bit to be set. |
1821 | */ | 1837 | */ |
1822 | ret_val = | 1838 | ret_val = |
1823 | e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); | 1839 | e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); |
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1834 | msleep(100); | 1850 | msleep(100); |
1835 | } | 1851 | } |
1836 | if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { | 1852 | if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { |
1837 | /* We didn't get link. Reset the DSP and wait again for link. */ | 1853 | /* We didn't get link. Reset the DSP and wait again |
1854 | * for link. | ||
1855 | */ | ||
1838 | ret_val = e1000_phy_reset_dsp(hw); | 1856 | ret_val = e1000_phy_reset_dsp(hw); |
1839 | if (ret_val) { | 1857 | if (ret_val) { |
1840 | e_dbg("Error Resetting PHY DSP\n"); | 1858 | e_dbg("Error Resetting PHY DSP\n"); |
1841 | return ret_val; | 1859 | return ret_val; |
1842 | } | 1860 | } |
1843 | } | 1861 | } |
1844 | /* This loop will early-out if the link condition has been met. */ | 1862 | /* This loop will early-out if the link condition has been |
1863 | * met | ||
1864 | */ | ||
1845 | for (i = PHY_FORCE_TIME; i > 0; i--) { | 1865 | for (i = PHY_FORCE_TIME; i > 0; i--) { |
1846 | if (mii_status_reg & MII_SR_LINK_STATUS) | 1866 | if (mii_status_reg & MII_SR_LINK_STATUS) |
1847 | break; | 1867 | break; |
1848 | msleep(100); | 1868 | msleep(100); |
1849 | /* Read the MII Status Register and wait for Auto-Neg Complete bit | 1869 | /* Read the MII Status Register and wait for Auto-Neg |
1850 | * to be set. | 1870 | * Complete bit to be set. |
1851 | */ | 1871 | */ |
1852 | ret_val = | 1872 | ret_val = |
1853 | e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); | 1873 | e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); |
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1862 | } | 1882 | } |
1863 | 1883 | ||
1864 | if (hw->phy_type == e1000_phy_m88) { | 1884 | if (hw->phy_type == e1000_phy_m88) { |
1865 | /* Because we reset the PHY above, we need to re-force TX_CLK in the | 1885 | /* Because we reset the PHY above, we need to re-force TX_CLK in |
1866 | * Extended PHY Specific Control Register to 25MHz clock. This value | 1886 | * the Extended PHY Specific Control Register to 25MHz clock. |
1867 | * defaults back to a 2.5MHz clock when the PHY is reset. | 1887 | * This value defaults back to a 2.5MHz clock when the PHY is |
1888 | * reset. | ||
1868 | */ | 1889 | */ |
1869 | ret_val = | 1890 | ret_val = |
1870 | e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 1891 | e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, |
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
1879 | if (ret_val) | 1900 | if (ret_val) |
1880 | return ret_val; | 1901 | return ret_val; |
1881 | 1902 | ||
1882 | /* In addition, because of the s/w reset above, we need to enable CRS on | 1903 | /* In addition, because of the s/w reset above, we need to |
1883 | * TX. This must be set for both full and half duplex operation. | 1904 | * enable CRS on Tx. This must be set for both full and half |
1905 | * duplex operation. | ||
1884 | */ | 1906 | */ |
1885 | ret_val = | 1907 | ret_val = |
1886 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 1908 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
1951 | e_dbg("e1000_config_mac_to_phy"); | 1973 | e_dbg("e1000_config_mac_to_phy"); |
1952 | 1974 | ||
1953 | /* 82544 or newer MAC, Auto Speed Detection takes care of | 1975 | /* 82544 or newer MAC, Auto Speed Detection takes care of |
1954 | * MAC speed/duplex configuration.*/ | 1976 | * MAC speed/duplex configuration. |
1977 | */ | ||
1955 | if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) | 1978 | if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) |
1956 | return E1000_SUCCESS; | 1979 | return E1000_SUCCESS; |
1957 | 1980 | ||
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
1985 | * registers depending on negotiated values. | 2008 | * registers depending on negotiated values. |
1986 | */ | 2009 | */ |
1987 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, | 2010 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, |
1988 | &phy_data); | 2011 | &phy_data); |
1989 | if (ret_val) | 2012 | if (ret_val) |
1990 | return ret_val; | 2013 | return ret_val; |
1991 | 2014 | ||
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2002 | if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) | 2025 | if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) |
2003 | ctrl |= E1000_CTRL_SPD_1000; | 2026 | ctrl |= E1000_CTRL_SPD_1000; |
2004 | else if ((phy_data & M88E1000_PSSR_SPEED) == | 2027 | else if ((phy_data & M88E1000_PSSR_SPEED) == |
2005 | M88E1000_PSSR_100MBS) | 2028 | M88E1000_PSSR_100MBS) |
2006 | ctrl |= E1000_CTRL_SPD_100; | 2029 | ctrl |= E1000_CTRL_SPD_100; |
2007 | } | 2030 | } |
2008 | 2031 | ||
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2135 | if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { | 2158 | if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { |
2136 | /* The AutoNeg process has completed, so we now need to | 2159 | /* The AutoNeg process has completed, so we now need to |
2137 | * read both the Auto Negotiation Advertisement Register | 2160 | * read both the Auto Negotiation Advertisement Register |
2138 | * (Address 4) and the Auto_Negotiation Base Page Ability | 2161 | * (Address 4) and the Auto_Negotiation Base Page |
2139 | * Register (Address 5) to determine how flow control was | 2162 | * Ability Register (Address 5) to determine how flow |
2140 | * negotiated. | 2163 | * control was negotiated. |
2141 | */ | 2164 | */ |
2142 | ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, | 2165 | ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, |
2143 | &mii_nway_adv_reg); | 2166 | &mii_nway_adv_reg); |
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2148 | if (ret_val) | 2171 | if (ret_val) |
2149 | return ret_val; | 2172 | return ret_val; |
2150 | 2173 | ||
2151 | /* Two bits in the Auto Negotiation Advertisement Register | 2174 | /* Two bits in the Auto Negotiation Advertisement |
2152 | * (Address 4) and two bits in the Auto Negotiation Base | 2175 | * Register (Address 4) and two bits in the Auto |
2153 | * Page Ability Register (Address 5) determine flow control | 2176 | * Negotiation Base Page Ability Register (Address 5) |
2154 | * for both the PHY and the link partner. The following | 2177 | * determine flow control for both the PHY and the link |
2155 | * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, | 2178 | * partner. The following table, taken out of the IEEE |
2156 | * 1999, describes these PAUSE resolution bits and how flow | 2179 | * 802.3ab/D6.0 dated March 25, 1999, describes these |
2157 | * control is determined based upon these settings. | 2180 | * PAUSE resolution bits and how flow control is |
2181 | * determined based upon these settings. | ||
2158 | * NOTE: DC = Don't Care | 2182 | * NOTE: DC = Don't Care |
2159 | * | 2183 | * |
2160 | * LOCAL DEVICE | LINK PARTNER | 2184 | * LOCAL DEVICE | LINK PARTNER |
2161 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution | 2185 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution |
2162 | *-------|---------|-------|---------|-------------------- | 2186 | *-------|---------|-------|---------|------------------ |
2163 | * 0 | 0 | DC | DC | E1000_FC_NONE | 2187 | * 0 | 0 | DC | DC | E1000_FC_NONE |
2164 | * 0 | 1 | 0 | DC | E1000_FC_NONE | 2188 | * 0 | 1 | 0 | DC | E1000_FC_NONE |
2165 | * 0 | 1 | 1 | 0 | E1000_FC_NONE | 2189 | * 0 | 1 | 1 | 0 | E1000_FC_NONE |
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2178 | * | 2202 | * |
2179 | * LOCAL DEVICE | LINK PARTNER | 2203 | * LOCAL DEVICE | LINK PARTNER |
2180 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 2204 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
2181 | *-------|---------|-------|---------|-------------------- | 2205 | *-------|---------|-------|---------|------------------ |
2182 | * 1 | DC | 1 | DC | E1000_FC_FULL | 2206 | * 1 | DC | 1 | DC | E1000_FC_FULL |
2183 | * | 2207 | * |
2184 | */ | 2208 | */ |
2185 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | 2209 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
2186 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { | 2210 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { |
2187 | /* Now we need to check if the user selected RX ONLY | 2211 | /* Now we need to check if the user selected Rx |
2188 | * of pause frames. In this case, we had to advertise | 2212 | * ONLY of pause frames. In this case, we had |
2189 | * FULL flow control because we could not advertise RX | 2213 | * to advertise FULL flow control because we |
2190 | * ONLY. Hence, we must now check to see if we need to | 2214 | * could not advertise Rx ONLY. Hence, we must |
2191 | * turn OFF the TRANSMISSION of PAUSE frames. | 2215 | * now check to see if we need to turn OFF the |
2216 | * TRANSMISSION of PAUSE frames. | ||
2192 | */ | 2217 | */ |
2193 | if (hw->original_fc == E1000_FC_FULL) { | 2218 | if (hw->original_fc == E1000_FC_FULL) { |
2194 | hw->fc = E1000_FC_FULL; | 2219 | hw->fc = E1000_FC_FULL; |
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2203 | * | 2228 | * |
2204 | * LOCAL DEVICE | LINK PARTNER | 2229 | * LOCAL DEVICE | LINK PARTNER |
2205 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 2230 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
2206 | *-------|---------|-------|---------|-------------------- | 2231 | *-------|---------|-------|---------|------------------ |
2207 | * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE | 2232 | * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE |
2208 | * | 2233 | * |
2209 | */ | 2234 | */ |
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2220 | * | 2245 | * |
2221 | * LOCAL DEVICE | LINK PARTNER | 2246 | * LOCAL DEVICE | LINK PARTNER |
2222 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 2247 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
2223 | *-------|---------|-------|---------|-------------------- | 2248 | *-------|---------|-------|---------|------------------ |
2224 | * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE | 2249 | * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE |
2225 | * | 2250 | * |
2226 | */ | 2251 | */ |
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2233 | e_dbg | 2258 | e_dbg |
2234 | ("Flow Control = RX PAUSE frames only.\n"); | 2259 | ("Flow Control = RX PAUSE frames only.\n"); |
2235 | } | 2260 | } |
2236 | /* Per the IEEE spec, at this point flow control should be | 2261 | /* Per the IEEE spec, at this point flow control should |
2237 | * disabled. However, we want to consider that we could | 2262 | * be disabled. However, we want to consider that we |
2238 | * be connected to a legacy switch that doesn't advertise | 2263 | * could be connected to a legacy switch that doesn't |
2239 | * desired flow control, but can be forced on the link | 2264 | * advertise desired flow control, but can be forced on |
2240 | * partner. So if we advertised no flow control, that is | 2265 | * the link partner. So if we advertised no flow |
2241 | * what we will resolve to. If we advertised some kind of | 2266 | * control, that is what we will resolve to. If we |
2242 | * receive capability (Rx Pause Only or Full Flow Control) | 2267 | * advertised some kind of receive capability (Rx Pause |
2243 | * and the link partner advertised none, we will configure | 2268 | * Only or Full Flow Control) and the link partner |
2244 | * ourselves to enable Rx Flow Control only. We can do | 2269 | * advertised none, we will configure ourselves to |
2245 | * this safely for two reasons: If the link partner really | 2270 | * enable Rx Flow Control only. We can do this safely |
2246 | * didn't want flow control enabled, and we enable Rx, no | 2271 | * for two reasons: If the link partner really |
2247 | * harm done since we won't be receiving any PAUSE frames | 2272 | * didn't want flow control enabled, and we enable Rx, |
2248 | * anyway. If the intent on the link partner was to have | 2273 | * no harm done since we won't be receiving any PAUSE |
2249 | * flow control enabled, then by us enabling RX only, we | 2274 | * frames anyway. If the intent on the link partner was |
2250 | * can at least receive pause frames and process them. | 2275 | * to have flow control enabled, then by us enabling Rx |
2251 | * This is a good idea because in most cases, since we are | 2276 | * only, we can at least receive pause frames and |
2252 | * predominantly a server NIC, more times than not we will | 2277 | * process them. This is a good idea because in most |
2253 | * be asked to delay transmission of packets than asking | 2278 | * cases, since we are predominantly a server NIC, more |
2254 | * our link partner to pause transmission of frames. | 2279 | * times than not we will be asked to delay transmission |
2280 | * of packets than asking our link partner to pause | ||
2281 | * transmission of frames. | ||
2255 | */ | 2282 | */ |
2256 | else if ((hw->original_fc == E1000_FC_NONE || | 2283 | else if ((hw->original_fc == E1000_FC_NONE || |
2257 | hw->original_fc == E1000_FC_TX_PAUSE) || | 2284 | hw->original_fc == E1000_FC_TX_PAUSE) || |
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) | |||
2316 | status = er32(STATUS); | 2343 | status = er32(STATUS); |
2317 | rxcw = er32(RXCW); | 2344 | rxcw = er32(RXCW); |
2318 | 2345 | ||
2319 | /* | 2346 | /* If we don't have link (auto-negotiation failed or link partner |
2320 | * If we don't have link (auto-negotiation failed or link partner | ||
2321 | * cannot auto-negotiate), and our link partner is not trying to | 2347 | * cannot auto-negotiate), and our link partner is not trying to |
2322 | * auto-negotiate with us (we are receiving idles or data), | 2348 | * auto-negotiate with us (we are receiving idles or data), |
2323 | * we need to force link up. We also need to give auto-negotiation | 2349 | * we need to force link up. We also need to give auto-negotiation |
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) | |||
2346 | goto out; | 2372 | goto out; |
2347 | } | 2373 | } |
2348 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 2374 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
2349 | /* | 2375 | /* If we are forcing link and we are receiving /C/ ordered |
2350 | * If we are forcing link and we are receiving /C/ ordered | ||
2351 | * sets, re-enable auto-negotiation in the TXCW register | 2376 | * sets, re-enable auto-negotiation in the TXCW register |
2352 | * and disable forced link in the Device Control register | 2377 | * and disable forced link in the Device Control register |
2353 | * in an attempt to auto-negotiate with our link partner. | 2378 | * in an attempt to auto-negotiate with our link partner. |
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) | |||
2358 | 2383 | ||
2359 | hw->serdes_has_link = true; | 2384 | hw->serdes_has_link = true; |
2360 | } else if (!(E1000_TXCW_ANE & er32(TXCW))) { | 2385 | } else if (!(E1000_TXCW_ANE & er32(TXCW))) { |
2361 | /* | 2386 | /* If we force link for non-auto-negotiation switch, check |
2362 | * If we force link for non-auto-negotiation switch, check | ||
2363 | * link status based on MAC synchronization for internal | 2387 | * link status based on MAC synchronization for internal |
2364 | * serdes media type. | 2388 | * serdes media type. |
2365 | */ | 2389 | */ |
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2468 | 2492 | ||
2469 | if (phy_data & MII_SR_LINK_STATUS) { | 2493 | if (phy_data & MII_SR_LINK_STATUS) { |
2470 | hw->get_link_status = false; | 2494 | hw->get_link_status = false; |
2471 | /* Check if there was DownShift, must be checked immediately after | 2495 | /* Check if there was DownShift, must be checked |
2472 | * link-up */ | 2496 | * immediately after link-up |
2497 | */ | ||
2473 | e1000_check_downshift(hw); | 2498 | e1000_check_downshift(hw); |
2474 | 2499 | ||
2475 | /* If we are on 82544 or 82543 silicon and speed/duplex | 2500 | /* If we are on 82544 or 82543 silicon and speed/duplex |
2476 | * are forced to 10H or 10F, then we will implement the polarity | 2501 | * are forced to 10H or 10F, then we will implement the |
2477 | * reversal workaround. We disable interrupts first, and upon | 2502 | * polarity reversal workaround. We disable interrupts |
2478 | * returning, place the devices interrupt state to its previous | 2503 | * first, and upon returning, place the devices |
2479 | * value except for the link status change interrupt which will | 2504 | * interrupt state to its previous value except for the |
2505 | * link status change interrupt which will | ||
2480 | * happen due to the execution of this workaround. | 2506 | * happen due to the execution of this workaround. |
2481 | */ | 2507 | */ |
2482 | 2508 | ||
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2527 | } | 2553 | } |
2528 | } | 2554 | } |
2529 | 2555 | ||
2530 | /* Configure Flow Control now that Auto-Neg has completed. First, we | 2556 | /* Configure Flow Control now that Auto-Neg has completed. |
2531 | * need to restore the desired flow control settings because we may | 2557 | * First, we need to restore the desired flow control settings |
2532 | * have had to re-autoneg with a different link partner. | 2558 | * because we may have had to re-autoneg with a different link |
2559 | * partner. | ||
2533 | */ | 2560 | */ |
2534 | ret_val = e1000_config_fc_after_link_up(hw); | 2561 | ret_val = e1000_config_fc_after_link_up(hw); |
2535 | if (ret_val) { | 2562 | if (ret_val) { |
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2538 | } | 2565 | } |
2539 | 2566 | ||
2540 | /* At this point we know that we are on copper and we have | 2567 | /* At this point we know that we are on copper and we have |
2541 | * auto-negotiated link. These are conditions for checking the link | 2568 | * auto-negotiated link. These are conditions for checking the |
2542 | * partner capability register. We use the link speed to determine if | 2569 | * link partner capability register. We use the link speed to |
2543 | * TBI compatibility needs to be turned on or off. If the link is not | 2570 | * determine if TBI compatibility needs to be turned on or off. |
2544 | * at gigabit speed, then TBI compatibility is not needed. If we are | 2571 | * If the link is not at gigabit speed, then TBI compatibility |
2545 | * at gigabit speed, we turn on TBI compatibility. | 2572 | * is not needed. If we are at gigabit speed, we turn on TBI |
2573 | * compatibility. | ||
2546 | */ | 2574 | */ |
2547 | if (hw->tbi_compatibility_en) { | 2575 | if (hw->tbi_compatibility_en) { |
2548 | u16 speed, duplex; | 2576 | u16 speed, duplex; |
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2554 | return ret_val; | 2582 | return ret_val; |
2555 | } | 2583 | } |
2556 | if (speed != SPEED_1000) { | 2584 | if (speed != SPEED_1000) { |
2557 | /* If link speed is not set to gigabit speed, we do not need | 2585 | /* If link speed is not set to gigabit speed, we |
2558 | * to enable TBI compatibility. | 2586 | * do not need to enable TBI compatibility. |
2559 | */ | 2587 | */ |
2560 | if (hw->tbi_compatibility_on) { | 2588 | if (hw->tbi_compatibility_on) { |
2561 | /* If we previously were in the mode, turn it off. */ | 2589 | /* If we previously were in the mode, |
2590 | * turn it off. | ||
2591 | */ | ||
2562 | rctl = er32(RCTL); | 2592 | rctl = er32(RCTL); |
2563 | rctl &= ~E1000_RCTL_SBP; | 2593 | rctl &= ~E1000_RCTL_SBP; |
2564 | ew32(RCTL, rctl); | 2594 | ew32(RCTL, rctl); |
2565 | hw->tbi_compatibility_on = false; | 2595 | hw->tbi_compatibility_on = false; |
2566 | } | 2596 | } |
2567 | } else { | 2597 | } else { |
2568 | /* If TBI compatibility is was previously off, turn it on. For | 2598 | /* If TBI compatibility is was previously off, |
2569 | * compatibility with a TBI link partner, we will store bad | 2599 | * turn it on. For compatibility with a TBI link |
2570 | * packets. Some frames have an additional byte on the end and | 2600 | * partner, we will store bad packets. Some |
2601 | * frames have an additional byte on the end and | ||
2571 | * will look like CRC errors to to the hardware. | 2602 | * will look like CRC errors to to the hardware. |
2572 | */ | 2603 | */ |
2573 | if (!hw->tbi_compatibility_on) { | 2604 | if (!hw->tbi_compatibility_on) { |
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) | |||
2629 | *duplex = FULL_DUPLEX; | 2660 | *duplex = FULL_DUPLEX; |
2630 | } | 2661 | } |
2631 | 2662 | ||
2632 | /* IGP01 PHY may advertise full duplex operation after speed downgrade even | 2663 | /* IGP01 PHY may advertise full duplex operation after speed downgrade |
2633 | * if it is operating at half duplex. Here we set the duplex settings to | 2664 | * even if it is operating at half duplex. Here we set the duplex |
2634 | * match the duplex in the link partner's capabilities. | 2665 | * settings to match the duplex in the link partner's capabilities. |
2635 | */ | 2666 | */ |
2636 | if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { | 2667 | if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { |
2637 | ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); | 2668 | ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); |
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) | |||
2697 | */ | 2728 | */ |
2698 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) | 2729 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) |
2699 | { | 2730 | { |
2700 | /* Raise the clock input to the Management Data Clock (by setting the MDC | 2731 | /* Raise the clock input to the Management Data Clock (by setting the |
2701 | * bit), and then delay 10 microseconds. | 2732 | * MDC bit), and then delay 10 microseconds. |
2702 | */ | 2733 | */ |
2703 | ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); | 2734 | ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); |
2704 | E1000_WRITE_FLUSH(); | 2735 | E1000_WRITE_FLUSH(); |
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) | |||
2712 | */ | 2743 | */ |
2713 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) | 2744 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) |
2714 | { | 2745 | { |
2715 | /* Lower the clock input to the Management Data Clock (by clearing the MDC | 2746 | /* Lower the clock input to the Management Data Clock (by clearing the |
2716 | * bit), and then delay 10 microseconds. | 2747 | * MDC bit), and then delay 10 microseconds. |
2717 | */ | 2748 | */ |
2718 | ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); | 2749 | ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); |
2719 | E1000_WRITE_FLUSH(); | 2750 | E1000_WRITE_FLUSH(); |
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) | |||
2746 | ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); | 2777 | ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); |
2747 | 2778 | ||
2748 | while (mask) { | 2779 | while (mask) { |
2749 | /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and | 2780 | /* A "1" is shifted out to the PHY by setting the MDIO bit to |
2750 | * then raising and lowering the Management Data Clock. A "0" is | 2781 | * "1" and then raising and lowering the Management Data Clock. |
2751 | * shifted out to the PHY by setting the MDIO bit to "0" and then | 2782 | * A "0" is shifted out to the PHY by setting the MDIO bit to |
2752 | * raising and lowering the clock. | 2783 | * "0" and then raising and lowering the clock. |
2753 | */ | 2784 | */ |
2754 | if (data & mask) | 2785 | if (data & mask) |
2755 | ctrl |= E1000_CTRL_MDIO; | 2786 | ctrl |= E1000_CTRL_MDIO; |
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
2781 | u8 i; | 2812 | u8 i; |
2782 | 2813 | ||
2783 | /* In order to read a register from the PHY, we need to shift in a total | 2814 | /* In order to read a register from the PHY, we need to shift in a total |
2784 | * of 18 bits from the PHY. The first two bit (turnaround) times are used | 2815 | * of 18 bits from the PHY. The first two bit (turnaround) times are |
2785 | * to avoid contention on the MDIO pin when a read operation is performed. | 2816 | * used to avoid contention on the MDIO pin when a read operation is |
2786 | * These two bits are ignored by us and thrown away. Bits are "shifted in" | 2817 | * performed. These two bits are ignored by us and thrown away. Bits are |
2787 | * by raising the input to the Management Data Clock (setting the MDC bit), | 2818 | * "shifted in" by raising the input to the Management Data Clock |
2788 | * and then reading the value of the MDIO bit. | 2819 | * (setting the MDC bit), and then reading the value of the MDIO bit. |
2789 | */ | 2820 | */ |
2790 | ctrl = er32(CTRL); | 2821 | ctrl = er32(CTRL); |
2791 | 2822 | ||
2792 | /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ | 2823 | /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as |
2824 | * input. | ||
2825 | */ | ||
2793 | ctrl &= ~E1000_CTRL_MDIO_DIR; | 2826 | ctrl &= ~E1000_CTRL_MDIO_DIR; |
2794 | ctrl &= ~E1000_CTRL_MDIO; | 2827 | ctrl &= ~E1000_CTRL_MDIO; |
2795 | 2828 | ||
2796 | ew32(CTRL, ctrl); | 2829 | ew32(CTRL, ctrl); |
2797 | E1000_WRITE_FLUSH(); | 2830 | E1000_WRITE_FLUSH(); |
2798 | 2831 | ||
2799 | /* Raise and Lower the clock before reading in the data. This accounts for | 2832 | /* Raise and Lower the clock before reading in the data. This accounts |
2800 | * the turnaround bits. The first clock occurred when we clocked out the | 2833 | * for the turnaround bits. The first clock occurred when we clocked out |
2801 | * last bit of the Register Address. | 2834 | * the last bit of the Register Address. |
2802 | */ | 2835 | */ |
2803 | e1000_raise_mdi_clk(hw, &ctrl); | 2836 | e1000_raise_mdi_clk(hw, &ctrl); |
2804 | e1000_lower_mdi_clk(hw, &ctrl); | 2837 | e1000_lower_mdi_clk(hw, &ctrl); |
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
2870 | 2903 | ||
2871 | if (hw->mac_type > e1000_82543) { | 2904 | if (hw->mac_type > e1000_82543) { |
2872 | /* Set up Op-code, Phy Address, and register address in the MDI | 2905 | /* Set up Op-code, Phy Address, and register address in the MDI |
2873 | * Control register. The MAC will take care of interfacing with the | 2906 | * Control register. The MAC will take care of interfacing with |
2874 | * PHY to retrieve the desired data. | 2907 | * the PHY to retrieve the desired data. |
2875 | */ | 2908 | */ |
2876 | if (hw->mac_type == e1000_ce4100) { | 2909 | if (hw->mac_type == e1000_ce4100) { |
2877 | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | | 2910 | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
2929 | *phy_data = (u16) mdic; | 2962 | *phy_data = (u16) mdic; |
2930 | } | 2963 | } |
2931 | } else { | 2964 | } else { |
2932 | /* We must first send a preamble through the MDIO pin to signal the | 2965 | /* We must first send a preamble through the MDIO pin to signal |
2933 | * beginning of an MII instruction. This is done by sending 32 | 2966 | * the beginning of an MII instruction. This is done by sending |
2934 | * consecutive "1" bits. | 2967 | * 32 consecutive "1" bits. |
2935 | */ | 2968 | */ |
2936 | e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); | 2969 | e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); |
2937 | 2970 | ||
2938 | /* Now combine the next few fields that are required for a read | 2971 | /* Now combine the next few fields that are required for a read |
2939 | * operation. We use this method instead of calling the | 2972 | * operation. We use this method instead of calling the |
2940 | * e1000_shift_out_mdi_bits routine five different times. The format of | 2973 | * e1000_shift_out_mdi_bits routine five different times. The |
2941 | * a MII read instruction consists of a shift out of 14 bits and is | 2974 | * format of a MII read instruction consists of a shift out of |
2942 | * defined as follows: | 2975 | * 14 bits and is defined as follows: |
2943 | * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> | 2976 | * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> |
2944 | * followed by a shift in of 18 bits. This first two bits shifted in | 2977 | * followed by a shift in of 18 bits. This first two bits |
2945 | * are TurnAround bits used to avoid contention on the MDIO pin when a | 2978 | * shifted in are TurnAround bits used to avoid contention on |
2946 | * READ operation is performed. These two bits are thrown away | 2979 | * the MDIO pin when a READ operation is performed. These two |
2947 | * followed by a shift in of 16 bits which contains the desired data. | 2980 | * bits are thrown away followed by a shift in of 16 bits which |
2981 | * contains the desired data. | ||
2948 | */ | 2982 | */ |
2949 | mdic = ((reg_addr) | (phy_addr << 5) | | 2983 | mdic = ((reg_addr) | (phy_addr << 5) | |
2950 | (PHY_OP_READ << 10) | (PHY_SOF << 12)); | 2984 | (PHY_OP_READ << 10) | (PHY_SOF << 12)); |
2951 | 2985 | ||
2952 | e1000_shift_out_mdi_bits(hw, mdic, 14); | 2986 | e1000_shift_out_mdi_bits(hw, mdic, 14); |
2953 | 2987 | ||
2954 | /* Now that we've shifted out the read command to the MII, we need to | 2988 | /* Now that we've shifted out the read command to the MII, we |
2955 | * "shift in" the 16-bit value (18 total bits) of the requested PHY | 2989 | * need to "shift in" the 16-bit value (18 total bits) of the |
2956 | * register address. | 2990 | * requested PHY register address. |
2957 | */ | 2991 | */ |
2958 | *phy_data = e1000_shift_in_mdi_bits(hw); | 2992 | *phy_data = e1000_shift_in_mdi_bits(hw); |
2959 | } | 2993 | } |
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3060 | } | 3094 | } |
3061 | } | 3095 | } |
3062 | } else { | 3096 | } else { |
3063 | /* We'll need to use the SW defined pins to shift the write command | 3097 | /* We'll need to use the SW defined pins to shift the write |
3064 | * out to the PHY. We first send a preamble to the PHY to signal the | 3098 | * command out to the PHY. We first send a preamble to the PHY |
3065 | * beginning of the MII instruction. This is done by sending 32 | 3099 | * to signal the beginning of the MII instruction. This is done |
3066 | * consecutive "1" bits. | 3100 | * by sending 32 consecutive "1" bits. |
3067 | */ | 3101 | */ |
3068 | e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); | 3102 | e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); |
3069 | 3103 | ||
3070 | /* Now combine the remaining required fields that will indicate a | 3104 | /* Now combine the remaining required fields that will indicate |
3071 | * write operation. We use this method instead of calling the | 3105 | * a write operation. We use this method instead of calling the |
3072 | * e1000_shift_out_mdi_bits routine for each field in the command. The | 3106 | * e1000_shift_out_mdi_bits routine for each field in the |
3073 | * format of a MII write instruction is as follows: | 3107 | * command. The format of a MII write instruction is as follows: |
3074 | * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. | 3108 | * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>. |
3075 | */ | 3109 | */ |
3076 | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | | 3110 | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | |
3077 | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); | 3111 | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); |
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3100 | e_dbg("Resetting Phy...\n"); | 3134 | e_dbg("Resetting Phy...\n"); |
3101 | 3135 | ||
3102 | if (hw->mac_type > e1000_82543) { | 3136 | if (hw->mac_type > e1000_82543) { |
3103 | /* Read the device control register and assert the E1000_CTRL_PHY_RST | 3137 | /* Read the device control register and assert the |
3104 | * bit. Then, take it out of reset. | 3138 | * E1000_CTRL_PHY_RST bit. Then, take it out of reset. |
3105 | * For e1000 hardware, we delay for 10ms between the assert | 3139 | * For e1000 hardware, we delay for 10ms between the assert |
3106 | * and deassert. | 3140 | * and de-assert. |
3107 | */ | 3141 | */ |
3108 | ctrl = er32(CTRL); | 3142 | ctrl = er32(CTRL); |
3109 | ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); | 3143 | ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); |
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3115 | E1000_WRITE_FLUSH(); | 3149 | E1000_WRITE_FLUSH(); |
3116 | 3150 | ||
3117 | } else { | 3151 | } else { |
3118 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3152 | /* Read the Extended Device Control Register, assert the |
3119 | * bit to put the PHY into reset. Then, take it out of reset. | 3153 | * PHY_RESET_DIR bit to put the PHY into reset. Then, take it |
3154 | * out of reset. | ||
3120 | */ | 3155 | */ |
3121 | ctrl_ext = er32(CTRL_EXT); | 3156 | ctrl_ext = er32(CTRL_EXT); |
3122 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; | 3157 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; |
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, | |||
3301 | e_dbg("e1000_phy_igp_get_info"); | 3336 | e_dbg("e1000_phy_igp_get_info"); |
3302 | 3337 | ||
3303 | /* The downshift status is checked only once, after link is established, | 3338 | /* The downshift status is checked only once, after link is established, |
3304 | * and it stored in the hw->speed_downgraded parameter. */ | 3339 | * and it stored in the hw->speed_downgraded parameter. |
3340 | */ | ||
3305 | phy_info->downshift = (e1000_downshift) hw->speed_downgraded; | 3341 | phy_info->downshift = (e1000_downshift) hw->speed_downgraded; |
3306 | 3342 | ||
3307 | /* IGP01E1000 does not need to support it. */ | 3343 | /* IGP01E1000 does not need to support it. */ |
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, | |||
3327 | 3363 | ||
3328 | if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == | 3364 | if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == |
3329 | IGP01E1000_PSSR_SPEED_1000MBPS) { | 3365 | IGP01E1000_PSSR_SPEED_1000MBPS) { |
3330 | /* Local/Remote Receiver Information are only valid at 1000 Mbps */ | 3366 | /* Local/Remote Receiver Information are only valid @ 1000 |
3367 | * Mbps | ||
3368 | */ | ||
3331 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); | 3369 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); |
3332 | if (ret_val) | 3370 | if (ret_val) |
3333 | return ret_val; | 3371 | return ret_val; |
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, | |||
3379 | e_dbg("e1000_phy_m88_get_info"); | 3417 | e_dbg("e1000_phy_m88_get_info"); |
3380 | 3418 | ||
3381 | /* The downshift status is checked only once, after link is established, | 3419 | /* The downshift status is checked only once, after link is established, |
3382 | * and it stored in the hw->speed_downgraded parameter. */ | 3420 | * and it stored in the hw->speed_downgraded parameter. |
3421 | */ | ||
3383 | phy_info->downshift = (e1000_downshift) hw->speed_downgraded; | 3422 | phy_info->downshift = (e1000_downshift) hw->speed_downgraded; |
3384 | 3423 | ||
3385 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 3424 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) | |||
3574 | } | 3613 | } |
3575 | 3614 | ||
3576 | if (eeprom->type == e1000_eeprom_spi) { | 3615 | if (eeprom->type == e1000_eeprom_spi) { |
3577 | /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to | 3616 | /* eeprom_size will be an enum [0..8] that maps to eeprom sizes |
3578 | * 32KB (incremented by powers of 2). | 3617 | * 128B to 32KB (incremented by powers of 2). |
3579 | */ | 3618 | */ |
3580 | /* Set to default value for initial eeprom read. */ | 3619 | /* Set to default value for initial eeprom read. */ |
3581 | eeprom->word_size = 64; | 3620 | eeprom->word_size = 64; |
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw) | |||
3585 | eeprom_size = | 3624 | eeprom_size = |
3586 | (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; | 3625 | (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; |
3587 | /* 256B eeprom size was not supported in earlier hardware, so we | 3626 | /* 256B eeprom size was not supported in earlier hardware, so we |
3588 | * bump eeprom_size up one to ensure that "1" (which maps to 256B) | 3627 | * bump eeprom_size up one to ensure that "1" (which maps to |
3589 | * is never the result used in the shifting logic below. */ | 3628 | * 256B) is never the result used in the shifting logic below. |
3629 | */ | ||
3590 | if (eeprom_size) | 3630 | if (eeprom_size) |
3591 | eeprom_size++; | 3631 | eeprom_size++; |
3592 | 3632 | ||
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) | |||
3618 | */ | 3658 | */ |
3619 | static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) | 3659 | static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) |
3620 | { | 3660 | { |
3621 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then | 3661 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and |
3622 | * wait 50 microseconds. | 3662 | * then wait 50 microseconds. |
3623 | */ | 3663 | */ |
3624 | *eecd = *eecd & ~E1000_EECD_SK; | 3664 | *eecd = *eecd & ~E1000_EECD_SK; |
3625 | ew32(EECD, *eecd); | 3665 | ew32(EECD, *eecd); |
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) | |||
3651 | eecd |= E1000_EECD_DO; | 3691 | eecd |= E1000_EECD_DO; |
3652 | } | 3692 | } |
3653 | do { | 3693 | do { |
3654 | /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", | 3694 | /* A "1" is shifted out to the EEPROM by setting bit "DI" to a |
3655 | * and then raising and then lowering the clock (the SK bit controls | 3695 | * "1", and then raising and then lowering the clock (the SK bit |
3656 | * the clock input to the EEPROM). A "0" is shifted out to the EEPROM | 3696 | * controls the clock input to the EEPROM). A "0" is shifted |
3657 | * by setting "DI" to "0" and then raising and then lowering the clock. | 3697 | * out to the EEPROM by setting "DI" to "0" and then raising and |
3698 | * then lowering the clock. | ||
3658 | */ | 3699 | */ |
3659 | eecd &= ~E1000_EECD_DI; | 3700 | eecd &= ~E1000_EECD_DI; |
3660 | 3701 | ||
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) | |||
3691 | 3732 | ||
3692 | /* In order to read a register from the EEPROM, we need to shift 'count' | 3733 | /* In order to read a register from the EEPROM, we need to shift 'count' |
3693 | * bits in from the EEPROM. Bits are "shifted in" by raising the clock | 3734 | * bits in from the EEPROM. Bits are "shifted in" by raising the clock |
3694 | * input to the EEPROM (setting the SK bit), and then reading the value of | 3735 | * input to the EEPROM (setting the SK bit), and then reading the value |
3695 | * the "DO" bit. During this "shifting in" process the "DI" bit should | 3736 | * of the "DO" bit. During this "shifting in" process the "DI" bit |
3696 | * always be clear. | 3737 | * should always be clear. |
3697 | */ | 3738 | */ |
3698 | 3739 | ||
3699 | eecd = er32(EECD); | 3740 | eecd = er32(EECD); |
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
3945 | if (eeprom->word_size == 0) | 3986 | if (eeprom->word_size == 0) |
3946 | e1000_init_eeprom_params(hw); | 3987 | e1000_init_eeprom_params(hw); |
3947 | 3988 | ||
3948 | /* A check for invalid values: offset too large, too many words, and not | 3989 | /* A check for invalid values: offset too large, too many words, and |
3949 | * enough words. | 3990 | * not enough words. |
3950 | */ | 3991 | */ |
3951 | if ((offset >= eeprom->word_size) | 3992 | if ((offset >= eeprom->word_size) |
3952 | || (words > eeprom->word_size - offset) || (words == 0)) { | 3993 | || (words > eeprom->word_size - offset) || (words == 0)) { |
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
3964 | return -E1000_ERR_EEPROM; | 4005 | return -E1000_ERR_EEPROM; |
3965 | 4006 | ||
3966 | /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have | 4007 | /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have |
3967 | * acquired the EEPROM at this point, so any returns should release it */ | 4008 | * acquired the EEPROM at this point, so any returns should release it |
4009 | */ | ||
3968 | if (eeprom->type == e1000_eeprom_spi) { | 4010 | if (eeprom->type == e1000_eeprom_spi) { |
3969 | u16 word_in; | 4011 | u16 word_in; |
3970 | u8 read_opcode = EEPROM_READ_OPCODE_SPI; | 4012 | u8 read_opcode = EEPROM_READ_OPCODE_SPI; |
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
3976 | 4018 | ||
3977 | e1000_standby_eeprom(hw); | 4019 | e1000_standby_eeprom(hw); |
3978 | 4020 | ||
3979 | /* Some SPI eeproms use the 8th address bit embedded in the opcode */ | 4021 | /* Some SPI eeproms use the 8th address bit embedded in the |
4022 | * opcode | ||
4023 | */ | ||
3980 | if ((eeprom->address_bits == 8) && (offset >= 128)) | 4024 | if ((eeprom->address_bits == 8) && (offset >= 128)) |
3981 | read_opcode |= EEPROM_A8_OPCODE_SPI; | 4025 | read_opcode |= EEPROM_A8_OPCODE_SPI; |
3982 | 4026 | ||
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
3985 | e1000_shift_out_ee_bits(hw, (u16) (offset * 2), | 4029 | e1000_shift_out_ee_bits(hw, (u16) (offset * 2), |
3986 | eeprom->address_bits); | 4030 | eeprom->address_bits); |
3987 | 4031 | ||
3988 | /* Read the data. The address of the eeprom internally increments with | 4032 | /* Read the data. The address of the eeprom internally |
3989 | * each byte (spi) being read, saving on the overhead of eeprom setup | 4033 | * increments with each byte (spi) being read, saving on the |
3990 | * and tear-down. The address counter will roll over if reading beyond | 4034 | * overhead of eeprom setup and tear-down. The address counter |
3991 | * the size of the eeprom, thus allowing the entire memory to be read | 4035 | * will roll over if reading beyond the size of the eeprom, thus |
3992 | * starting from any offset. */ | 4036 | * allowing the entire memory to be read starting from any |
4037 | * offset. | ||
4038 | */ | ||
3993 | for (i = 0; i < words; i++) { | 4039 | for (i = 0; i < words; i++) { |
3994 | word_in = e1000_shift_in_ee_bits(hw, 16); | 4040 | word_in = e1000_shift_in_ee_bits(hw, 16); |
3995 | data[i] = (word_in >> 8) | (word_in << 8); | 4041 | data[i] = (word_in >> 8) | (word_in << 8); |
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
4003 | e1000_shift_out_ee_bits(hw, (u16) (offset + i), | 4049 | e1000_shift_out_ee_bits(hw, (u16) (offset + i), |
4004 | eeprom->address_bits); | 4050 | eeprom->address_bits); |
4005 | 4051 | ||
4006 | /* Read the data. For microwire, each word requires the overhead | 4052 | /* Read the data. For microwire, each word requires the |
4007 | * of eeprom setup and tear-down. */ | 4053 | * overhead of eeprom setup and tear-down. |
4054 | */ | ||
4008 | data[i] = e1000_shift_in_ee_bits(hw, 16); | 4055 | data[i] = e1000_shift_in_ee_bits(hw, 16); |
4009 | e1000_standby_eeprom(hw); | 4056 | e1000_standby_eeprom(hw); |
4010 | } | 4057 | } |
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
4119 | if (eeprom->word_size == 0) | 4166 | if (eeprom->word_size == 0) |
4120 | e1000_init_eeprom_params(hw); | 4167 | e1000_init_eeprom_params(hw); |
4121 | 4168 | ||
4122 | /* A check for invalid values: offset too large, too many words, and not | 4169 | /* A check for invalid values: offset too large, too many words, and |
4123 | * enough words. | 4170 | * not enough words. |
4124 | */ | 4171 | */ |
4125 | if ((offset >= eeprom->word_size) | 4172 | if ((offset >= eeprom->word_size) |
4126 | || (words > eeprom->word_size - offset) || (words == 0)) { | 4173 | || (words > eeprom->word_size - offset) || (words == 0)) { |
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, | |||
4174 | 4221 | ||
4175 | e1000_standby_eeprom(hw); | 4222 | e1000_standby_eeprom(hw); |
4176 | 4223 | ||
4177 | /* Some SPI eeproms use the 8th address bit embedded in the opcode */ | 4224 | /* Some SPI eeproms use the 8th address bit embedded in the |
4225 | * opcode | ||
4226 | */ | ||
4178 | if ((eeprom->address_bits == 8) && (offset >= 128)) | 4227 | if ((eeprom->address_bits == 8) && (offset >= 128)) |
4179 | write_opcode |= EEPROM_A8_OPCODE_SPI; | 4228 | write_opcode |= EEPROM_A8_OPCODE_SPI; |
4180 | 4229 | ||
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, | |||
4186 | 4235 | ||
4187 | /* Send the data */ | 4236 | /* Send the data */ |
4188 | 4237 | ||
4189 | /* Loop to allow for up to whole page write (32 bytes) of eeprom */ | 4238 | /* Loop to allow for up to whole page write (32 bytes) of |
4239 | * eeprom | ||
4240 | */ | ||
4190 | while (widx < words) { | 4241 | while (widx < words) { |
4191 | u16 word_out = data[widx]; | 4242 | u16 word_out = data[widx]; |
4192 | word_out = (word_out >> 8) | (word_out << 8); | 4243 | word_out = (word_out >> 8) | (word_out << 8); |
4193 | e1000_shift_out_ee_bits(hw, word_out, 16); | 4244 | e1000_shift_out_ee_bits(hw, word_out, 16); |
4194 | widx++; | 4245 | widx++; |
4195 | 4246 | ||
4196 | /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE | 4247 | /* Some larger eeprom sizes are capable of a 32-byte |
4197 | * operation, while the smaller eeproms are capable of an 8-byte | 4248 | * PAGE WRITE operation, while the smaller eeproms are |
4198 | * PAGE WRITE operation. Break the inner loop to pass new address | 4249 | * capable of an 8-byte PAGE WRITE operation. Break the |
4250 | * inner loop to pass new address | ||
4199 | */ | 4251 | */ |
4200 | if ((((offset + widx) * 2) % eeprom->page_size) == 0) { | 4252 | if ((((offset + widx) * 2) % eeprom->page_size) == 0) { |
4201 | e1000_standby_eeprom(hw); | 4253 | e1000_standby_eeprom(hw); |
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, | |||
4249 | /* Send the data */ | 4301 | /* Send the data */ |
4250 | e1000_shift_out_ee_bits(hw, data[words_written], 16); | 4302 | e1000_shift_out_ee_bits(hw, data[words_written], 16); |
4251 | 4303 | ||
4252 | /* Toggle the CS line. This in effect tells the EEPROM to execute | 4304 | /* Toggle the CS line. This in effect tells the EEPROM to |
4253 | * the previous command. | 4305 | * execute the previous command. |
4254 | */ | 4306 | */ |
4255 | e1000_standby_eeprom(hw); | 4307 | e1000_standby_eeprom(hw); |
4256 | 4308 | ||
4257 | /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will | 4309 | /* Read DO repeatedly until it is high (equal to '1'). The |
4258 | * signal that the command has been completed by raising the DO signal. | 4310 | * EEPROM will signal that the command has been completed by |
4259 | * If DO does not go high in 10 milliseconds, then error out. | 4311 | * raising the DO signal. If DO does not go high in 10 |
4312 | * milliseconds, then error out. | ||
4260 | */ | 4313 | */ |
4261 | for (i = 0; i < 200; i++) { | 4314 | for (i = 0; i < 200; i++) { |
4262 | eecd = er32(EECD); | 4315 | eecd = er32(EECD); |
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw) | |||
4483 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | 4536 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { |
4484 | /* If the offset we want to clear is the same offset of the | 4537 | /* If the offset we want to clear is the same offset of the |
4485 | * manageability VLAN ID, then clear all bits except that of the | 4538 | * manageability VLAN ID, then clear all bits except that of the |
4486 | * manageability unit */ | 4539 | * manageability unit |
4540 | */ | ||
4487 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; | 4541 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; |
4488 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); | 4542 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); |
4489 | E1000_WRITE_FLUSH(); | 4543 | E1000_WRITE_FLUSH(); |
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, | |||
4911 | * counters overcount this packet as a CRC error and undercount | 4965 | * counters overcount this packet as a CRC error and undercount |
4912 | * the packet as a good packet | 4966 | * the packet as a good packet |
4913 | */ | 4967 | */ |
4914 | /* This packet should not be counted as a CRC error. */ | 4968 | /* This packet should not be counted as a CRC error. */ |
4915 | stats->crcerrs--; | 4969 | stats->crcerrs--; |
4916 | /* This packet does count as a Good Packet Received. */ | 4970 | /* This packet does count as a Good Packet Received. */ |
4917 | stats->gprc++; | 4971 | stats->gprc++; |
4918 | 4972 | ||
4919 | /* Adjust the Good Octets received counters */ | 4973 | /* Adjust the Good Octets received counters */ |
4920 | carry_bit = 0x80000000 & stats->gorcl; | 4974 | carry_bit = 0x80000000 & stats->gorcl; |
4921 | stats->gorcl += frame_len; | 4975 | stats->gorcl += frame_len; |
4922 | /* If the high bit of Gorcl (the low 32 bits of the Good Octets | 4976 | /* If the high bit of Gorcl (the low 32 bits of the Good Octets |
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, | |||
5196 | if (ret_val) | 5250 | if (ret_val) |
5197 | return ret_val; | 5251 | return ret_val; |
5198 | 5252 | ||
5199 | /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to | 5253 | /* If speed is 1000 Mbps, must read the |
5200 | * find the polarity status */ | 5254 | * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status |
5255 | */ | ||
5201 | if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == | 5256 | if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == |
5202 | IGP01E1000_PSSR_SPEED_1000MBPS) { | 5257 | IGP01E1000_PSSR_SPEED_1000MBPS) { |
5203 | 5258 | ||
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, | |||
5213 | e1000_rev_polarity_reversed : | 5268 | e1000_rev_polarity_reversed : |
5214 | e1000_rev_polarity_normal; | 5269 | e1000_rev_polarity_normal; |
5215 | } else { | 5270 | } else { |
5216 | /* For 10 Mbps, read the polarity bit in the status register. (for | 5271 | /* For 10 Mbps, read the polarity bit in the status |
5217 | * 100 Mbps this bit is always 0) */ | 5272 | * register. (for 100 Mbps this bit is always 0) |
5273 | */ | ||
5218 | *polarity = | 5274 | *polarity = |
5219 | (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? | 5275 | (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? |
5220 | e1000_rev_polarity_reversed : | 5276 | e1000_rev_polarity_reversed : |
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) | |||
5374 | } | 5430 | } |
5375 | } else { | 5431 | } else { |
5376 | if (hw->dsp_config_state == e1000_dsp_config_activated) { | 5432 | if (hw->dsp_config_state == e1000_dsp_config_activated) { |
5377 | /* Save off the current value of register 0x2F5B to be restored at | 5433 | /* Save off the current value of register 0x2F5B to be |
5378 | * the end of the routines. */ | 5434 | * restored at the end of the routines. |
5435 | */ | ||
5379 | ret_val = | 5436 | ret_val = |
5380 | e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); | 5437 | e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); |
5381 | 5438 | ||
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) | |||
5391 | msleep(20); | 5448 | msleep(20); |
5392 | 5449 | ||
5393 | ret_val = e1000_write_phy_reg(hw, 0x0000, | 5450 | ret_val = e1000_write_phy_reg(hw, 0x0000, |
5394 | IGP01E1000_IEEE_FORCE_GIGA); | 5451 | IGP01E1000_IEEE_FORCE_GIGA); |
5395 | if (ret_val) | 5452 | if (ret_val) |
5396 | return ret_val; | 5453 | return ret_val; |
5397 | for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { | 5454 | for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { |
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) | |||
5412 | } | 5469 | } |
5413 | 5470 | ||
5414 | ret_val = e1000_write_phy_reg(hw, 0x0000, | 5471 | ret_val = e1000_write_phy_reg(hw, 0x0000, |
5415 | IGP01E1000_IEEE_RESTART_AUTONEG); | 5472 | IGP01E1000_IEEE_RESTART_AUTONEG); |
5416 | if (ret_val) | 5473 | if (ret_val) |
5417 | return ret_val; | 5474 | return ret_val; |
5418 | 5475 | ||
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) | |||
5429 | } | 5486 | } |
5430 | 5487 | ||
5431 | if (hw->ffe_config_state == e1000_ffe_config_active) { | 5488 | if (hw->ffe_config_state == e1000_ffe_config_active) { |
5432 | /* Save off the current value of register 0x2F5B to be restored at | 5489 | /* Save off the current value of register 0x2F5B to be |
5433 | * the end of the routines. */ | 5490 | * restored at the end of the routines. |
5491 | */ | ||
5434 | ret_val = | 5492 | ret_val = |
5435 | e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); | 5493 | e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); |
5436 | 5494 | ||
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) | |||
5446 | msleep(20); | 5504 | msleep(20); |
5447 | 5505 | ||
5448 | ret_val = e1000_write_phy_reg(hw, 0x0000, | 5506 | ret_val = e1000_write_phy_reg(hw, 0x0000, |
5449 | IGP01E1000_IEEE_FORCE_GIGA); | 5507 | IGP01E1000_IEEE_FORCE_GIGA); |
5450 | if (ret_val) | 5508 | if (ret_val) |
5451 | return ret_val; | 5509 | return ret_val; |
5452 | ret_val = | 5510 | ret_val = |
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) | |||
5456 | return ret_val; | 5514 | return ret_val; |
5457 | 5515 | ||
5458 | ret_val = e1000_write_phy_reg(hw, 0x0000, | 5516 | ret_val = e1000_write_phy_reg(hw, 0x0000, |
5459 | IGP01E1000_IEEE_RESTART_AUTONEG); | 5517 | IGP01E1000_IEEE_RESTART_AUTONEG); |
5460 | if (ret_val) | 5518 | if (ret_val) |
5461 | return ret_val; | 5519 | return ret_val; |
5462 | 5520 | ||
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
5542 | return E1000_SUCCESS; | 5600 | return E1000_SUCCESS; |
5543 | 5601 | ||
5544 | /* During driver activity LPLU should not be used or it will attain link | 5602 | /* During driver activity LPLU should not be used or it will attain link |
5545 | * from the lowest speeds starting from 10Mbps. The capability is used for | 5603 | * from the lowest speeds starting from 10Mbps. The capability is used |
5546 | * Dx transitions and states */ | 5604 | * for Dx transitions and states |
5605 | */ | ||
5547 | if (hw->mac_type == e1000_82541_rev_2 | 5606 | if (hw->mac_type == e1000_82541_rev_2 |
5548 | || hw->mac_type == e1000_82547_rev_2) { | 5607 | || hw->mac_type == e1000_82547_rev_2) { |
5549 | ret_val = | 5608 | ret_val = |
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
5563 | return ret_val; | 5622 | return ret_val; |
5564 | } | 5623 | } |
5565 | 5624 | ||
5566 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during | 5625 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used |
5567 | * Dx states where the power conservation is most important. During | 5626 | * during Dx states where the power conservation is most |
5568 | * driver activity we should enable SmartSpeed, so performance is | 5627 | * important. During driver activity we should enable |
5569 | * maintained. */ | 5628 | * SmartSpeed, so performance is maintained. |
5629 | */ | ||
5570 | if (hw->smart_speed == e1000_smart_speed_on) { | 5630 | if (hw->smart_speed == e1000_smart_speed_on) { |
5571 | ret_val = | 5631 | ret_val = |
5572 | e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, | 5632 | e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index d947e3aae1e8..8502c625dbef 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) | |||
239 | * e1000_init_module is the first routine called when the driver is | 239 | * e1000_init_module is the first routine called when the driver is |
240 | * loaded. All it does is register with the PCI subsystem. | 240 | * loaded. All it does is register with the PCI subsystem. |
241 | **/ | 241 | **/ |
242 | |||
243 | static int __init e1000_init_module(void) | 242 | static int __init e1000_init_module(void) |
244 | { | 243 | { |
245 | int ret; | 244 | int ret; |
@@ -266,7 +265,6 @@ module_init(e1000_init_module); | |||
266 | * e1000_exit_module is called just before the driver is removed | 265 | * e1000_exit_module is called just before the driver is removed |
267 | * from memory. | 266 | * from memory. |
268 | **/ | 267 | **/ |
269 | |||
270 | static void __exit e1000_exit_module(void) | 268 | static void __exit e1000_exit_module(void) |
271 | { | 269 | { |
272 | pci_unregister_driver(&e1000_driver); | 270 | pci_unregister_driver(&e1000_driver); |
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter) | |||
301 | * e1000_irq_disable - Mask off interrupt generation on the NIC | 299 | * e1000_irq_disable - Mask off interrupt generation on the NIC |
302 | * @adapter: board private structure | 300 | * @adapter: board private structure |
303 | **/ | 301 | **/ |
304 | |||
305 | static void e1000_irq_disable(struct e1000_adapter *adapter) | 302 | static void e1000_irq_disable(struct e1000_adapter *adapter) |
306 | { | 303 | { |
307 | struct e1000_hw *hw = &adapter->hw; | 304 | struct e1000_hw *hw = &adapter->hw; |
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter) | |||
315 | * e1000_irq_enable - Enable default interrupt generation settings | 312 | * e1000_irq_enable - Enable default interrupt generation settings |
316 | * @adapter: board private structure | 313 | * @adapter: board private structure |
317 | **/ | 314 | **/ |
318 | |||
319 | static void e1000_irq_enable(struct e1000_adapter *adapter) | 315 | static void e1000_irq_enable(struct e1000_adapter *adapter) |
320 | { | 316 | { |
321 | struct e1000_hw *hw = &adapter->hw; | 317 | struct e1000_hw *hw = &adapter->hw; |
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
398 | e1000_configure_rx(adapter); | 394 | e1000_configure_rx(adapter); |
399 | /* call E1000_DESC_UNUSED which always leaves | 395 | /* call E1000_DESC_UNUSED which always leaves |
400 | * at least 1 descriptor unused to make sure | 396 | * at least 1 descriptor unused to make sure |
401 | * next_to_use != next_to_clean */ | 397 | * next_to_use != next_to_clean |
398 | */ | ||
402 | for (i = 0; i < adapter->num_rx_queues; i++) { | 399 | for (i = 0; i < adapter->num_rx_queues; i++) { |
403 | struct e1000_rx_ring *ring = &adapter->rx_ring[i]; | 400 | struct e1000_rx_ring *ring = &adapter->rx_ring[i]; |
404 | adapter->alloc_rx_buf(adapter, ring, | 401 | adapter->alloc_rx_buf(adapter, ring, |
405 | E1000_DESC_UNUSED(ring)); | 402 | E1000_DESC_UNUSED(ring)); |
406 | } | 403 | } |
407 | } | 404 | } |
408 | 405 | ||
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter) | |||
433 | * The phy may be powered down to save power and turn off link when the | 430 | * The phy may be powered down to save power and turn off link when the |
434 | * driver is unloaded and wake on lan is not enabled (among others) | 431 | * driver is unloaded and wake on lan is not enabled (among others) |
435 | * *** this routine MUST be followed by a call to e1000_reset *** | 432 | * *** this routine MUST be followed by a call to e1000_reset *** |
436 | * | ||
437 | **/ | 433 | **/ |
438 | |||
439 | void e1000_power_up_phy(struct e1000_adapter *adapter) | 434 | void e1000_power_up_phy(struct e1000_adapter *adapter) |
440 | { | 435 | { |
441 | struct e1000_hw *hw = &adapter->hw; | 436 | struct e1000_hw *hw = &adapter->hw; |
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter) | |||
444 | /* Just clear the power down bit to wake the phy back up */ | 439 | /* Just clear the power down bit to wake the phy back up */ |
445 | if (hw->media_type == e1000_media_type_copper) { | 440 | if (hw->media_type == e1000_media_type_copper) { |
446 | /* according to the manual, the phy will retain its | 441 | /* according to the manual, the phy will retain its |
447 | * settings across a power-down/up cycle */ | 442 | * settings across a power-down/up cycle |
443 | */ | ||
448 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); | 444 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
449 | mii_reg &= ~MII_CR_POWER_DOWN; | 445 | mii_reg &= ~MII_CR_POWER_DOWN; |
450 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); | 446 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
459 | * The PHY cannot be powered down if any of the following is true * | 455 | * The PHY cannot be powered down if any of the following is true * |
460 | * (a) WoL is enabled | 456 | * (a) WoL is enabled |
461 | * (b) AMT is active | 457 | * (b) AMT is active |
462 | * (c) SoL/IDER session is active */ | 458 | * (c) SoL/IDER session is active |
459 | */ | ||
463 | if (!adapter->wol && hw->mac_type >= e1000_82540 && | 460 | if (!adapter->wol && hw->mac_type >= e1000_82540 && |
464 | hw->media_type == e1000_media_type_copper) { | 461 | hw->media_type == e1000_media_type_copper) { |
465 | u16 mii_reg = 0; | 462 | u16 mii_reg = 0; |
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter) | |||
529 | 526 | ||
530 | e1000_irq_disable(adapter); | 527 | e1000_irq_disable(adapter); |
531 | 528 | ||
532 | /* | 529 | /* Setting DOWN must be after irq_disable to prevent |
533 | * Setting DOWN must be after irq_disable to prevent | ||
534 | * a screaming interrupt. Setting DOWN also prevents | 530 | * a screaming interrupt. Setting DOWN also prevents |
535 | * tasks from rescheduling. | 531 | * tasks from rescheduling. |
536 | */ | 532 | */ |
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
627 | * rounded up to the next 1KB and expressed in KB. Likewise, | 623 | * rounded up to the next 1KB and expressed in KB. Likewise, |
628 | * the Rx FIFO should be large enough to accommodate at least | 624 | * the Rx FIFO should be large enough to accommodate at least |
629 | * one full receive packet and is similarly rounded up and | 625 | * one full receive packet and is similarly rounded up and |
630 | * expressed in KB. */ | 626 | * expressed in KB. |
627 | */ | ||
631 | pba = er32(PBA); | 628 | pba = er32(PBA); |
632 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 629 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
633 | tx_space = pba >> 16; | 630 | tx_space = pba >> 16; |
634 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 631 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
635 | pba &= 0xffff; | 632 | pba &= 0xffff; |
636 | /* | 633 | /* the Tx fifo also stores 16 bytes of information about the Tx |
637 | * the tx fifo also stores 16 bytes of information about the tx | ||
638 | * but don't include ethernet FCS because hardware appends it | 634 | * but don't include ethernet FCS because hardware appends it |
639 | */ | 635 | */ |
640 | min_tx_space = (hw->max_frame_size + | 636 | min_tx_space = (hw->max_frame_size + |
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
649 | 645 | ||
650 | /* If current Tx allocation is less than the min Tx FIFO size, | 646 | /* If current Tx allocation is less than the min Tx FIFO size, |
651 | * and the min Tx FIFO size is less than the current Rx FIFO | 647 | * and the min Tx FIFO size is less than the current Rx FIFO |
652 | * allocation, take space away from current Rx allocation */ | 648 | * allocation, take space away from current Rx allocation |
649 | */ | ||
653 | if (tx_space < min_tx_space && | 650 | if (tx_space < min_tx_space && |
654 | ((min_tx_space - tx_space) < pba)) { | 651 | ((min_tx_space - tx_space) < pba)) { |
655 | pba = pba - (min_tx_space - tx_space); | 652 | pba = pba - (min_tx_space - tx_space); |
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
663 | break; | 660 | break; |
664 | } | 661 | } |
665 | 662 | ||
666 | /* if short on rx space, rx wins and must trump tx | 663 | /* if short on Rx space, Rx wins and must trump Tx |
667 | * adjustment or use Early Receive if available */ | 664 | * adjustment or use Early Receive if available |
665 | */ | ||
668 | if (pba < min_rx_space) | 666 | if (pba < min_rx_space) |
669 | pba = min_rx_space; | 667 | pba = min_rx_space; |
670 | } | 668 | } |
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
672 | 670 | ||
673 | ew32(PBA, pba); | 671 | ew32(PBA, pba); |
674 | 672 | ||
675 | /* | 673 | /* flow control settings: |
676 | * flow control settings: | ||
677 | * The high water mark must be low enough to fit one full frame | 674 | * The high water mark must be low enough to fit one full frame |
678 | * (or the size used for early receive) above it in the Rx FIFO. | 675 | * (or the size used for early receive) above it in the Rx FIFO. |
679 | * Set it to the lower of: | 676 | * Set it to the lower of: |
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
707 | u32 ctrl = er32(CTRL); | 704 | u32 ctrl = er32(CTRL); |
708 | /* clear phy power management bit if we are in gig only mode, | 705 | /* clear phy power management bit if we are in gig only mode, |
709 | * which if enabled will attempt negotiation to 100Mb, which | 706 | * which if enabled will attempt negotiation to 100Mb, which |
710 | * can cause a loss of link at power off or driver unload */ | 707 | * can cause a loss of link at power off or driver unload |
708 | */ | ||
711 | ctrl &= ~E1000_CTRL_SWDPIN3; | 709 | ctrl &= ~E1000_CTRL_SWDPIN3; |
712 | ew32(CTRL, ctrl); | 710 | ew32(CTRL, ctrl); |
713 | } | 711 | } |
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev) | |||
808 | static netdev_features_t e1000_fix_features(struct net_device *netdev, | 806 | static netdev_features_t e1000_fix_features(struct net_device *netdev, |
809 | netdev_features_t features) | 807 | netdev_features_t features) |
810 | { | 808 | { |
811 | /* | 809 | /* Since there is no support for separate Rx/Tx vlan accel |
812 | * Since there is no support for separate rx/tx vlan accel | 810 | * enable/disable make sure Tx flag is always in same state as Rx. |
813 | * enable/disable make sure tx flag is always in same state as rx. | ||
814 | */ | 811 | */ |
815 | if (features & NETIF_F_HW_VLAN_RX) | 812 | if (features & NETIF_F_HW_VLAN_RX) |
816 | features |= NETIF_F_HW_VLAN_TX; | 813 | features |= NETIF_F_HW_VLAN_TX; |
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1012 | if (err) | 1009 | if (err) |
1013 | goto err_sw_init; | 1010 | goto err_sw_init; |
1014 | 1011 | ||
1015 | /* | 1012 | /* there is a workaround being applied below that limits |
1016 | * there is a workaround being applied below that limits | ||
1017 | * 64-bit DMA addresses to 64-bit hardware. There are some | 1013 | * 64-bit DMA addresses to 64-bit hardware. There are some |
1018 | * 32-bit adapters that Tx hang when given 64-bit DMA addresses | 1014 | * 32-bit adapters that Tx hang when given 64-bit DMA addresses |
1019 | */ | 1015 | */ |
1020 | pci_using_dac = 0; | 1016 | pci_using_dac = 0; |
1021 | if ((hw->bus_type == e1000_bus_type_pcix) && | 1017 | if ((hw->bus_type == e1000_bus_type_pcix) && |
1022 | !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { | 1018 | !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
1023 | /* | 1019 | /* according to DMA-API-HOWTO, coherent calls will always |
1024 | * according to DMA-API-HOWTO, coherent calls will always | ||
1025 | * succeed if the set call did | 1020 | * succeed if the set call did |
1026 | */ | 1021 | */ |
1027 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | 1022 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1099 | } | 1094 | } |
1100 | 1095 | ||
1101 | /* before reading the EEPROM, reset the controller to | 1096 | /* before reading the EEPROM, reset the controller to |
1102 | * put the device in a known good starting state */ | 1097 | * put the device in a known good starting state |
1098 | */ | ||
1103 | 1099 | ||
1104 | e1000_reset_hw(hw); | 1100 | e1000_reset_hw(hw); |
1105 | 1101 | ||
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1107 | if (e1000_validate_eeprom_checksum(hw) < 0) { | 1103 | if (e1000_validate_eeprom_checksum(hw) < 0) { |
1108 | e_err(probe, "The EEPROM Checksum Is Not Valid\n"); | 1104 | e_err(probe, "The EEPROM Checksum Is Not Valid\n"); |
1109 | e1000_dump_eeprom(adapter); | 1105 | e1000_dump_eeprom(adapter); |
1110 | /* | 1106 | /* set MAC address to all zeroes to invalidate and temporary |
1111 | * set MAC address to all zeroes to invalidate and temporary | ||
1112 | * disable this device for the user. This blocks regular | 1107 | * disable this device for the user. This blocks regular |
1113 | * traffic while still permitting ethtool ioctls from reaching | 1108 | * traffic while still permitting ethtool ioctls from reaching |
1114 | * the hardware as well as allowing the user to run the | 1109 | * the hardware as well as allowing the user to run the |
@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1169 | 1164 | ||
1170 | /* now that we have the eeprom settings, apply the special cases | 1165 | /* now that we have the eeprom settings, apply the special cases |
1171 | * where the eeprom may be wrong or the board simply won't support | 1166 | * where the eeprom may be wrong or the board simply won't support |
1172 | * wake on lan on a particular port */ | 1167 | * wake on lan on a particular port |
1168 | */ | ||
1173 | switch (pdev->device) { | 1169 | switch (pdev->device) { |
1174 | case E1000_DEV_ID_82546GB_PCIE: | 1170 | case E1000_DEV_ID_82546GB_PCIE: |
1175 | adapter->eeprom_wol = 0; | 1171 | adapter->eeprom_wol = 0; |
@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1177 | case E1000_DEV_ID_82546EB_FIBER: | 1173 | case E1000_DEV_ID_82546EB_FIBER: |
1178 | case E1000_DEV_ID_82546GB_FIBER: | 1174 | case E1000_DEV_ID_82546GB_FIBER: |
1179 | /* Wake events only supported on port A for dual fiber | 1175 | /* Wake events only supported on port A for dual fiber |
1180 | * regardless of eeprom setting */ | 1176 | * regardless of eeprom setting |
1177 | */ | ||
1181 | if (er32(STATUS) & E1000_STATUS_FUNC_1) | 1178 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
1182 | adapter->eeprom_wol = 0; | 1179 | adapter->eeprom_wol = 0; |
1183 | break; | 1180 | break; |
@@ -1270,7 +1267,6 @@ err_pci_reg: | |||
1270 | * Hot-Plug event, or because the driver is going to be removed from | 1267 | * Hot-Plug event, or because the driver is going to be removed from |
1271 | * memory. | 1268 | * memory. |
1272 | **/ | 1269 | **/ |
1273 | |||
1274 | static void e1000_remove(struct pci_dev *pdev) | 1270 | static void e1000_remove(struct pci_dev *pdev) |
1275 | { | 1271 | { |
1276 | struct net_device *netdev = pci_get_drvdata(pdev); | 1272 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev) | |||
1306 | * e1000_sw_init initializes the Adapter private data structure. | 1302 | * e1000_sw_init initializes the Adapter private data structure. |
1307 | * e1000_init_hw_struct MUST be called before this function | 1303 | * e1000_init_hw_struct MUST be called before this function |
1308 | **/ | 1304 | **/ |
1309 | |||
1310 | static int e1000_sw_init(struct e1000_adapter *adapter) | 1305 | static int e1000_sw_init(struct e1000_adapter *adapter) |
1311 | { | 1306 | { |
1312 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 1307 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) | |||
1337 | * We allocate one ring per queue at run-time since we don't know the | 1332 | * We allocate one ring per queue at run-time since we don't know the |
1338 | * number of queues at compile-time. | 1333 | * number of queues at compile-time. |
1339 | **/ | 1334 | **/ |
1340 | |||
1341 | static int e1000_alloc_queues(struct e1000_adapter *adapter) | 1335 | static int e1000_alloc_queues(struct e1000_adapter *adapter) |
1342 | { | 1336 | { |
1343 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | 1337 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, |
@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1367 | * handler is registered with the OS, the watchdog task is started, | 1361 | * handler is registered with the OS, the watchdog task is started, |
1368 | * and the stack is notified that the interface is ready. | 1362 | * and the stack is notified that the interface is ready. |
1369 | **/ | 1363 | **/ |
1370 | |||
1371 | static int e1000_open(struct net_device *netdev) | 1364 | static int e1000_open(struct net_device *netdev) |
1372 | { | 1365 | { |
1373 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1366 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev) | |||
1401 | /* before we allocate an interrupt, we must be ready to handle it. | 1394 | /* before we allocate an interrupt, we must be ready to handle it. |
1402 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 1395 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
1403 | * as soon as we call pci_request_irq, so we have to setup our | 1396 | * as soon as we call pci_request_irq, so we have to setup our |
1404 | * clean_rx handler before we do so. */ | 1397 | * clean_rx handler before we do so. |
1398 | */ | ||
1405 | e1000_configure(adapter); | 1399 | e1000_configure(adapter); |
1406 | 1400 | ||
1407 | err = e1000_request_irq(adapter); | 1401 | err = e1000_request_irq(adapter); |
@@ -1444,7 +1438,6 @@ err_setup_tx: | |||
1444 | * needs to be disabled. A global MAC reset is issued to stop the | 1438 | * needs to be disabled. A global MAC reset is issued to stop the |
1445 | * hardware, and all transmit and receive resources are freed. | 1439 | * hardware, and all transmit and receive resources are freed. |
1446 | **/ | 1440 | **/ |
1447 | |||
1448 | static int e1000_close(struct net_device *netdev) | 1441 | static int e1000_close(struct net_device *netdev) |
1449 | { | 1442 | { |
1450 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1443 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev) | |||
1459 | e1000_free_all_rx_resources(adapter); | 1452 | e1000_free_all_rx_resources(adapter); |
1460 | 1453 | ||
1461 | /* kill manageability vlan ID if supported, but not if a vlan with | 1454 | /* kill manageability vlan ID if supported, but not if a vlan with |
1462 | * the same ID is registered on the host OS (let 8021q kill it) */ | 1455 | * the same ID is registered on the host OS (let 8021q kill it) |
1456 | */ | ||
1463 | if ((hw->mng_cookie.status & | 1457 | if ((hw->mng_cookie.status & |
1464 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 1458 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
1465 | !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { | 1459 | !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { |
1466 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 1460 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
1467 | } | 1461 | } |
1468 | 1462 | ||
@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, | |||
1483 | unsigned long end = begin + len; | 1477 | unsigned long end = begin + len; |
1484 | 1478 | ||
1485 | /* First rev 82545 and 82546 need to not allow any memory | 1479 | /* First rev 82545 and 82546 need to not allow any memory |
1486 | * write location to cross 64k boundary due to errata 23 */ | 1480 | * write location to cross 64k boundary due to errata 23 |
1481 | */ | ||
1487 | if (hw->mac_type == e1000_82545 || | 1482 | if (hw->mac_type == e1000_82545 || |
1488 | hw->mac_type == e1000_ce4100 || | 1483 | hw->mac_type == e1000_ce4100 || |
1489 | hw->mac_type == e1000_82546) { | 1484 | hw->mac_type == e1000_82546) { |
@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, | |||
1500 | * | 1495 | * |
1501 | * Return 0 on success, negative on failure | 1496 | * Return 0 on success, negative on failure |
1502 | **/ | 1497 | **/ |
1503 | |||
1504 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | 1498 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
1505 | struct e1000_tx_ring *txdr) | 1499 | struct e1000_tx_ring *txdr) |
1506 | { | 1500 | { |
@@ -1574,7 +1568,6 @@ setup_tx_desc_die: | |||
1574 | * | 1568 | * |
1575 | * Return 0 on success, negative on failure | 1569 | * Return 0 on success, negative on failure |
1576 | **/ | 1570 | **/ |
1577 | |||
1578 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | 1571 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) |
1579 | { | 1572 | { |
1580 | int i, err = 0; | 1573 | int i, err = 0; |
@@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | |||
1599 | * | 1592 | * |
1600 | * Configure the Tx unit of the MAC after a reset. | 1593 | * Configure the Tx unit of the MAC after a reset. |
1601 | **/ | 1594 | **/ |
1602 | |||
1603 | static void e1000_configure_tx(struct e1000_adapter *adapter) | 1595 | static void e1000_configure_tx(struct e1000_adapter *adapter) |
1604 | { | 1596 | { |
1605 | u64 tdba; | 1597 | u64 tdba; |
@@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1620 | ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); | 1612 | ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); |
1621 | ew32(TDT, 0); | 1613 | ew32(TDT, 0); |
1622 | ew32(TDH, 0); | 1614 | ew32(TDH, 0); |
1623 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); | 1615 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? |
1624 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); | 1616 | E1000_TDH : E1000_82542_TDH); |
1617 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? | ||
1618 | E1000_TDT : E1000_82542_TDT); | ||
1625 | break; | 1619 | break; |
1626 | } | 1620 | } |
1627 | 1621 | ||
@@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1676 | adapter->txd_cmd |= E1000_TXD_CMD_RS; | 1670 | adapter->txd_cmd |= E1000_TXD_CMD_RS; |
1677 | 1671 | ||
1678 | /* Cache if we're 82544 running in PCI-X because we'll | 1672 | /* Cache if we're 82544 running in PCI-X because we'll |
1679 | * need this to apply a workaround later in the send path. */ | 1673 | * need this to apply a workaround later in the send path. |
1674 | */ | ||
1680 | if (hw->mac_type == e1000_82544 && | 1675 | if (hw->mac_type == e1000_82544 && |
1681 | hw->bus_type == e1000_bus_type_pcix) | 1676 | hw->bus_type == e1000_bus_type_pcix) |
1682 | adapter->pcix_82544 = true; | 1677 | adapter->pcix_82544 = true; |
@@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1692 | * | 1687 | * |
1693 | * Returns 0 on success, negative on failure | 1688 | * Returns 0 on success, negative on failure |
1694 | **/ | 1689 | **/ |
1695 | |||
1696 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | 1690 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
1697 | struct e1000_rx_ring *rxdr) | 1691 | struct e1000_rx_ring *rxdr) |
1698 | { | 1692 | { |
@@ -1771,7 +1765,6 @@ setup_rx_desc_die: | |||
1771 | * | 1765 | * |
1772 | * Return 0 on success, negative on failure | 1766 | * Return 0 on success, negative on failure |
1773 | **/ | 1767 | **/ |
1774 | |||
1775 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | 1768 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) |
1776 | { | 1769 | { |
1777 | int i, err = 0; | 1770 | int i, err = 0; |
@@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1840 | /* This is useful for sniffing bad packets. */ | 1833 | /* This is useful for sniffing bad packets. */ |
1841 | if (adapter->netdev->features & NETIF_F_RXALL) { | 1834 | if (adapter->netdev->features & NETIF_F_RXALL) { |
1842 | /* UPE and MPE will be handled by normal PROMISC logic | 1835 | /* UPE and MPE will be handled by normal PROMISC logic |
1843 | * in e1000e_set_rx_mode */ | 1836 | * in e1000e_set_rx_mode |
1837 | */ | ||
1844 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ | 1838 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ |
1845 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ | 1839 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ |
1846 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | 1840 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ |
@@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1862 | * | 1856 | * |
1863 | * Configure the Rx unit of the MAC after a reset. | 1857 | * Configure the Rx unit of the MAC after a reset. |
1864 | **/ | 1858 | **/ |
1865 | |||
1866 | static void e1000_configure_rx(struct e1000_adapter *adapter) | 1859 | static void e1000_configure_rx(struct e1000_adapter *adapter) |
1867 | { | 1860 | { |
1868 | u64 rdba; | 1861 | u64 rdba; |
@@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1895 | } | 1888 | } |
1896 | 1889 | ||
1897 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1890 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1898 | * the Base and Length of the Rx Descriptor Ring */ | 1891 | * the Base and Length of the Rx Descriptor Ring |
1892 | */ | ||
1899 | switch (adapter->num_rx_queues) { | 1893 | switch (adapter->num_rx_queues) { |
1900 | case 1: | 1894 | case 1: |
1901 | default: | 1895 | default: |
@@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1905 | ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); | 1899 | ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); |
1906 | ew32(RDT, 0); | 1900 | ew32(RDT, 0); |
1907 | ew32(RDH, 0); | 1901 | ew32(RDH, 0); |
1908 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); | 1902 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? |
1909 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); | 1903 | E1000_RDH : E1000_82542_RDH); |
1904 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? | ||
1905 | E1000_RDT : E1000_82542_RDT); | ||
1910 | break; | 1906 | break; |
1911 | } | 1907 | } |
1912 | 1908 | ||
@@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1932 | * | 1928 | * |
1933 | * Free all transmit software resources | 1929 | * Free all transmit software resources |
1934 | **/ | 1930 | **/ |
1935 | |||
1936 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, | 1931 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
1937 | struct e1000_tx_ring *tx_ring) | 1932 | struct e1000_tx_ring *tx_ring) |
1938 | { | 1933 | { |
@@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter, | |||
1955 | * | 1950 | * |
1956 | * Free all transmit software resources | 1951 | * Free all transmit software resources |
1957 | **/ | 1952 | **/ |
1958 | |||
1959 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter) | 1953 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter) |
1960 | { | 1954 | { |
1961 | int i; | 1955 | int i; |
@@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | |||
1990 | * @adapter: board private structure | 1984 | * @adapter: board private structure |
1991 | * @tx_ring: ring to be cleaned | 1985 | * @tx_ring: ring to be cleaned |
1992 | **/ | 1986 | **/ |
1993 | |||
1994 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, | 1987 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
1995 | struct e1000_tx_ring *tx_ring) | 1988 | struct e1000_tx_ring *tx_ring) |
1996 | { | 1989 | { |
@@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, | |||
2026 | * e1000_clean_all_tx_rings - Free Tx Buffers for all queues | 2019 | * e1000_clean_all_tx_rings - Free Tx Buffers for all queues |
2027 | * @adapter: board private structure | 2020 | * @adapter: board private structure |
2028 | **/ | 2021 | **/ |
2029 | |||
2030 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) | 2022 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) |
2031 | { | 2023 | { |
2032 | int i; | 2024 | int i; |
@@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) | |||
2042 | * | 2034 | * |
2043 | * Free all receive software resources | 2035 | * Free all receive software resources |
2044 | **/ | 2036 | **/ |
2045 | |||
2046 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, | 2037 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
2047 | struct e1000_rx_ring *rx_ring) | 2038 | struct e1000_rx_ring *rx_ring) |
2048 | { | 2039 | { |
@@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter, | |||
2065 | * | 2056 | * |
2066 | * Free all receive software resources | 2057 | * Free all receive software resources |
2067 | **/ | 2058 | **/ |
2068 | |||
2069 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter) | 2059 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter) |
2070 | { | 2060 | { |
2071 | int i; | 2061 | int i; |
@@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter) | |||
2079 | * @adapter: board private structure | 2069 | * @adapter: board private structure |
2080 | * @rx_ring: ring to free buffers from | 2070 | * @rx_ring: ring to free buffers from |
2081 | **/ | 2071 | **/ |
2082 | |||
2083 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | 2072 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
2084 | struct e1000_rx_ring *rx_ring) | 2073 | struct e1000_rx_ring *rx_ring) |
2085 | { | 2074 | { |
@@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2138 | * e1000_clean_all_rx_rings - Free Rx Buffers for all queues | 2127 | * e1000_clean_all_rx_rings - Free Rx Buffers for all queues |
2139 | * @adapter: board private structure | 2128 | * @adapter: board private structure |
2140 | **/ | 2129 | **/ |
2141 | |||
2142 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) | 2130 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) |
2143 | { | 2131 | { |
2144 | int i; | 2132 | int i; |
@@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
2198 | * | 2186 | * |
2199 | * Returns 0 on success, negative on failure | 2187 | * Returns 0 on success, negative on failure |
2200 | **/ | 2188 | **/ |
2201 | |||
2202 | static int e1000_set_mac(struct net_device *netdev, void *p) | 2189 | static int e1000_set_mac(struct net_device *netdev, void *p) |
2203 | { | 2190 | { |
2204 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2191 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2233 | * responsible for configuring the hardware for proper unicast, multicast, | 2220 | * responsible for configuring the hardware for proper unicast, multicast, |
2234 | * promiscuous mode, and all-multi behavior. | 2221 | * promiscuous mode, and all-multi behavior. |
2235 | **/ | 2222 | **/ |
2236 | |||
2237 | static void e1000_set_rx_mode(struct net_device *netdev) | 2223 | static void e1000_set_rx_mode(struct net_device *netdev) |
2238 | { | 2224 | { |
2239 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2225 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2317 | } | 2303 | } |
2318 | 2304 | ||
2319 | /* write the hash table completely, write from bottom to avoid | 2305 | /* write the hash table completely, write from bottom to avoid |
2320 | * both stupid write combining chipsets, and flushing each write */ | 2306 | * both stupid write combining chipsets, and flushing each write |
2307 | */ | ||
2321 | for (i = mta_reg_count - 1; i >= 0 ; i--) { | 2308 | for (i = mta_reg_count - 1; i >= 0 ; i--) { |
2322 | /* | 2309 | /* If we are on an 82544 has an errata where writing odd |
2323 | * If we are on an 82544 has an errata where writing odd | ||
2324 | * offsets overwrites the previous even offset, but writing | 2310 | * offsets overwrites the previous even offset, but writing |
2325 | * backwards over the range solves the issue by always | 2311 | * backwards over the range solves the issue by always |
2326 | * writing the odd offset first | 2312 | * writing the odd offset first |
@@ -2458,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work) | |||
2458 | bool txb2b = true; | 2444 | bool txb2b = true; |
2459 | /* update snapshot of PHY registers on LSC */ | 2445 | /* update snapshot of PHY registers on LSC */ |
2460 | e1000_get_speed_and_duplex(hw, | 2446 | e1000_get_speed_and_duplex(hw, |
2461 | &adapter->link_speed, | 2447 | &adapter->link_speed, |
2462 | &adapter->link_duplex); | 2448 | &adapter->link_duplex); |
2463 | 2449 | ||
2464 | ctrl = er32(CTRL); | 2450 | ctrl = er32(CTRL); |
2465 | pr_info("%s NIC Link is Up %d Mbps %s, " | 2451 | pr_info("%s NIC Link is Up %d Mbps %s, " |
@@ -2533,7 +2519,8 @@ link_up: | |||
2533 | /* We've lost link, so the controller stops DMA, | 2519 | /* We've lost link, so the controller stops DMA, |
2534 | * but we've got queued Tx work that's never going | 2520 | * but we've got queued Tx work that's never going |
2535 | * to get done, so reset controller to flush Tx. | 2521 | * to get done, so reset controller to flush Tx. |
2536 | * (Do the reset outside of interrupt context). */ | 2522 | * (Do the reset outside of interrupt context). |
2523 | */ | ||
2537 | adapter->tx_timeout_count++; | 2524 | adapter->tx_timeout_count++; |
2538 | schedule_work(&adapter->reset_task); | 2525 | schedule_work(&adapter->reset_task); |
2539 | /* exit immediately since reset is imminent */ | 2526 | /* exit immediately since reset is imminent */ |
@@ -2543,8 +2530,7 @@ link_up: | |||
2543 | 2530 | ||
2544 | /* Simple mode for Interrupt Throttle Rate (ITR) */ | 2531 | /* Simple mode for Interrupt Throttle Rate (ITR) */ |
2545 | if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { | 2532 | if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { |
2546 | /* | 2533 | /* Symmetric Tx/Rx gets a reduced ITR=2000; |
2547 | * Symmetric Tx/Rx gets a reduced ITR=2000; | ||
2548 | * Total asymmetrical Tx or Rx gets ITR=8000; | 2534 | * Total asymmetrical Tx or Rx gets ITR=8000; |
2549 | * everyone else is between 2000-8000. | 2535 | * everyone else is between 2000-8000. |
2550 | */ | 2536 | */ |
@@ -2659,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter) | |||
2659 | goto set_itr_now; | 2645 | goto set_itr_now; |
2660 | } | 2646 | } |
2661 | 2647 | ||
2662 | adapter->tx_itr = e1000_update_itr(adapter, | 2648 | adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, |
2663 | adapter->tx_itr, | 2649 | adapter->total_tx_packets, |
2664 | adapter->total_tx_packets, | 2650 | adapter->total_tx_bytes); |
2665 | adapter->total_tx_bytes); | ||
2666 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | 2651 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
2667 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | 2652 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) |
2668 | adapter->tx_itr = low_latency; | 2653 | adapter->tx_itr = low_latency; |
2669 | 2654 | ||
2670 | adapter->rx_itr = e1000_update_itr(adapter, | 2655 | adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, |
2671 | adapter->rx_itr, | 2656 | adapter->total_rx_packets, |
2672 | adapter->total_rx_packets, | 2657 | adapter->total_rx_bytes); |
2673 | adapter->total_rx_bytes); | ||
2674 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | 2658 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
2675 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | 2659 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) |
2676 | adapter->rx_itr = low_latency; | 2660 | adapter->rx_itr = low_latency; |
@@ -2696,10 +2680,11 @@ set_itr_now: | |||
2696 | if (new_itr != adapter->itr) { | 2680 | if (new_itr != adapter->itr) { |
2697 | /* this attempts to bias the interrupt rate towards Bulk | 2681 | /* this attempts to bias the interrupt rate towards Bulk |
2698 | * by adding intermediate steps when interrupt rate is | 2682 | * by adding intermediate steps when interrupt rate is |
2699 | * increasing */ | 2683 | * increasing |
2684 | */ | ||
2700 | new_itr = new_itr > adapter->itr ? | 2685 | new_itr = new_itr > adapter->itr ? |
2701 | min(adapter->itr + (new_itr >> 2), new_itr) : | 2686 | min(adapter->itr + (new_itr >> 2), new_itr) : |
2702 | new_itr; | 2687 | new_itr; |
2703 | adapter->itr = new_itr; | 2688 | adapter->itr = new_itr; |
2704 | ew32(ITR, 1000000000 / (new_itr * 256)); | 2689 | ew32(ITR, 1000000000 / (new_itr * 256)); |
2705 | } | 2690 | } |
@@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2861 | /* Workaround for Controller erratum -- | 2846 | /* Workaround for Controller erratum -- |
2862 | * descriptor for non-tso packet in a linear SKB that follows a | 2847 | * descriptor for non-tso packet in a linear SKB that follows a |
2863 | * tso gets written back prematurely before the data is fully | 2848 | * tso gets written back prematurely before the data is fully |
2864 | * DMA'd to the controller */ | 2849 | * DMA'd to the controller |
2850 | */ | ||
2865 | if (!skb->data_len && tx_ring->last_tx_tso && | 2851 | if (!skb->data_len && tx_ring->last_tx_tso && |
2866 | !skb_is_gso(skb)) { | 2852 | !skb_is_gso(skb)) { |
2867 | tx_ring->last_tx_tso = false; | 2853 | tx_ring->last_tx_tso = false; |
@@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2869 | } | 2855 | } |
2870 | 2856 | ||
2871 | /* Workaround for premature desc write-backs | 2857 | /* Workaround for premature desc write-backs |
2872 | * in TSO mode. Append 4-byte sentinel desc */ | 2858 | * in TSO mode. Append 4-byte sentinel desc |
2859 | */ | ||
2873 | if (unlikely(mss && !nr_frags && size == len && size > 8)) | 2860 | if (unlikely(mss && !nr_frags && size == len && size > 8)) |
2874 | size -= 4; | 2861 | size -= 4; |
2875 | /* work-around for errata 10 and it applies | 2862 | /* work-around for errata 10 and it applies |
@@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2882 | size = 2015; | 2869 | size = 2015; |
2883 | 2870 | ||
2884 | /* Workaround for potential 82544 hang in PCI-X. Avoid | 2871 | /* Workaround for potential 82544 hang in PCI-X. Avoid |
2885 | * terminating buffers within evenly-aligned dwords. */ | 2872 | * terminating buffers within evenly-aligned dwords. |
2873 | */ | ||
2886 | if (unlikely(adapter->pcix_82544 && | 2874 | if (unlikely(adapter->pcix_82544 && |
2887 | !((unsigned long)(skb->data + offset + size - 1) & 4) && | 2875 | !((unsigned long)(skb->data + offset + size - 1) & 4) && |
2888 | size > 4)) | 2876 | size > 4)) |
@@ -2894,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2894 | buffer_info->mapped_as_page = false; | 2882 | buffer_info->mapped_as_page = false; |
2895 | buffer_info->dma = dma_map_single(&pdev->dev, | 2883 | buffer_info->dma = dma_map_single(&pdev->dev, |
2896 | skb->data + offset, | 2884 | skb->data + offset, |
2897 | size, DMA_TO_DEVICE); | 2885 | size, DMA_TO_DEVICE); |
2898 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 2886 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2899 | goto dma_error; | 2887 | goto dma_error; |
2900 | buffer_info->next_to_watch = i; | 2888 | buffer_info->next_to_watch = i; |
@@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
2925 | buffer_info = &tx_ring->buffer_info[i]; | 2913 | buffer_info = &tx_ring->buffer_info[i]; |
2926 | size = min(len, max_per_txd); | 2914 | size = min(len, max_per_txd); |
2927 | /* Workaround for premature desc write-backs | 2915 | /* Workaround for premature desc write-backs |
2928 | * in TSO mode. Append 4-byte sentinel desc */ | 2916 | * in TSO mode. Append 4-byte sentinel desc |
2929 | if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) | 2917 | */ |
2918 | if (unlikely(mss && f == (nr_frags-1) && | ||
2919 | size == len && size > 8)) | ||
2930 | size -= 4; | 2920 | size -= 4; |
2931 | /* Workaround for potential 82544 hang in PCI-X. | 2921 | /* Workaround for potential 82544 hang in PCI-X. |
2932 | * Avoid terminating buffers within evenly-aligned | 2922 | * Avoid terminating buffers within evenly-aligned |
2933 | * dwords. */ | 2923 | * dwords. |
2924 | */ | ||
2934 | bufend = (unsigned long) | 2925 | bufend = (unsigned long) |
2935 | page_to_phys(skb_frag_page(frag)); | 2926 | page_to_phys(skb_frag_page(frag)); |
2936 | bufend += offset + size - 1; | 2927 | bufend += offset + size - 1; |
@@ -2994,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
2994 | 2985 | ||
2995 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { | 2986 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
2996 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | 2987 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
2997 | E1000_TXD_CMD_TSE; | 2988 | E1000_TXD_CMD_TSE; |
2998 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | 2989 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
2999 | 2990 | ||
3000 | if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) | 2991 | if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) |
@@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
3035 | /* Force memory writes to complete before letting h/w | 3026 | /* Force memory writes to complete before letting h/w |
3036 | * know there are new descriptors to fetch. (Only | 3027 | * know there are new descriptors to fetch. (Only |
3037 | * applicable for weak-ordered memory model archs, | 3028 | * applicable for weak-ordered memory model archs, |
3038 | * such as IA-64). */ | 3029 | * such as IA-64). |
3030 | */ | ||
3039 | wmb(); | 3031 | wmb(); |
3040 | 3032 | ||
3041 | tx_ring->next_to_use = i; | 3033 | tx_ring->next_to_use = i; |
3042 | writel(i, hw->hw_addr + tx_ring->tdt); | 3034 | writel(i, hw->hw_addr + tx_ring->tdt); |
3043 | /* we need this if more than one processor can write to our tail | 3035 | /* we need this if more than one processor can write to our tail |
3044 | * at a time, it syncronizes IO on IA64/Altix systems */ | 3036 | * at a time, it synchronizes IO on IA64/Altix systems |
3037 | */ | ||
3045 | mmiowb(); | 3038 | mmiowb(); |
3046 | } | 3039 | } |
3047 | 3040 | ||
@@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |||
3090 | netif_stop_queue(netdev); | 3083 | netif_stop_queue(netdev); |
3091 | /* Herbert's original patch had: | 3084 | /* Herbert's original patch had: |
3092 | * smp_mb__after_netif_stop_queue(); | 3085 | * smp_mb__after_netif_stop_queue(); |
3093 | * but since that doesn't exist yet, just open code it. */ | 3086 | * but since that doesn't exist yet, just open code it. |
3087 | */ | ||
3094 | smp_mb(); | 3088 | smp_mb(); |
3095 | 3089 | ||
3096 | /* We need to check again in a case another CPU has just | 3090 | /* We need to check again in a case another CPU has just |
3097 | * made room available. */ | 3091 | * made room available. |
3092 | */ | ||
3098 | if (likely(E1000_DESC_UNUSED(tx_ring) < size)) | 3093 | if (likely(E1000_DESC_UNUSED(tx_ring) < size)) |
3099 | return -EBUSY; | 3094 | return -EBUSY; |
3100 | 3095 | ||
@@ -3105,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |||
3105 | } | 3100 | } |
3106 | 3101 | ||
3107 | static int e1000_maybe_stop_tx(struct net_device *netdev, | 3102 | static int e1000_maybe_stop_tx(struct net_device *netdev, |
3108 | struct e1000_tx_ring *tx_ring, int size) | 3103 | struct e1000_tx_ring *tx_ring, int size) |
3109 | { | 3104 | { |
3110 | if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) | 3105 | if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) |
3111 | return 0; | 3106 | return 0; |
@@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3129 | int tso; | 3124 | int tso; |
3130 | unsigned int f; | 3125 | unsigned int f; |
3131 | 3126 | ||
3132 | /* This goes back to the question of how to logically map a tx queue | 3127 | /* This goes back to the question of how to logically map a Tx queue |
3133 | * to a flow. Right now, performance is impacted slightly negatively | 3128 | * to a flow. Right now, performance is impacted slightly negatively |
3134 | * if using multiple tx queues. If the stack breaks away from a | 3129 | * if using multiple Tx queues. If the stack breaks away from a |
3135 | * single qdisc implementation, we can look at this again. */ | 3130 | * single qdisc implementation, we can look at this again. |
3131 | */ | ||
3136 | tx_ring = adapter->tx_ring; | 3132 | tx_ring = adapter->tx_ring; |
3137 | 3133 | ||
3138 | if (unlikely(skb->len <= 0)) { | 3134 | if (unlikely(skb->len <= 0)) { |
@@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3157 | * initiating the DMA for each buffer. The calc is: | 3153 | * initiating the DMA for each buffer. The calc is: |
3158 | * 4 = ceil(buffer len/mss). To make sure we don't | 3154 | * 4 = ceil(buffer len/mss). To make sure we don't |
3159 | * overrun the FIFO, adjust the max buffer len if mss | 3155 | * overrun the FIFO, adjust the max buffer len if mss |
3160 | * drops. */ | 3156 | * drops. |
3157 | */ | ||
3161 | if (mss) { | 3158 | if (mss) { |
3162 | u8 hdr_len; | 3159 | u8 hdr_len; |
3163 | max_per_txd = min(mss << 2, max_per_txd); | 3160 | max_per_txd = min(mss << 2, max_per_txd); |
@@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3173 | * this hardware's requirements | 3170 | * this hardware's requirements |
3174 | * NOTE: this is a TSO only workaround | 3171 | * NOTE: this is a TSO only workaround |
3175 | * if end byte alignment not correct move us | 3172 | * if end byte alignment not correct move us |
3176 | * into the next dword */ | 3173 | * into the next dword |
3177 | if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) | 3174 | */ |
3175 | if ((unsigned long)(skb_tail_pointer(skb) - 1) | ||
3176 | & 4) | ||
3178 | break; | 3177 | break; |
3179 | /* fall through */ | 3178 | /* fall through */ |
3180 | pull_size = min((unsigned int)4, skb->data_len); | 3179 | pull_size = min((unsigned int)4, skb->data_len); |
@@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3222 | count += nr_frags; | 3221 | count += nr_frags; |
3223 | 3222 | ||
3224 | /* need: count + 2 desc gap to keep tail from touching | 3223 | /* need: count + 2 desc gap to keep tail from touching |
3225 | * head, otherwise try next time */ | 3224 | * head, otherwise try next time |
3225 | */ | ||
3226 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) | 3226 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) |
3227 | return NETDEV_TX_BUSY; | 3227 | return NETDEV_TX_BUSY; |
3228 | 3228 | ||
@@ -3261,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3261 | tx_flags |= E1000_TX_FLAGS_NO_FCS; | 3261 | tx_flags |= E1000_TX_FLAGS_NO_FCS; |
3262 | 3262 | ||
3263 | count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, | 3263 | count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, |
3264 | nr_frags, mss); | 3264 | nr_frags, mss); |
3265 | 3265 | ||
3266 | if (count) { | 3266 | if (count) { |
3267 | netdev_sent_queue(netdev, skb->len); | 3267 | netdev_sent_queue(netdev, skb->len); |
@@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter) | |||
3363 | /* Print Registers */ | 3363 | /* Print Registers */ |
3364 | e1000_regdump(adapter); | 3364 | e1000_regdump(adapter); |
3365 | 3365 | ||
3366 | /* | 3366 | /* transmit dump */ |
3367 | * transmit dump | ||
3368 | */ | ||
3369 | pr_info("TX Desc ring0 dump\n"); | 3367 | pr_info("TX Desc ring0 dump\n"); |
3370 | 3368 | ||
3371 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) | 3369 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) |
@@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter) | |||
3426 | } | 3424 | } |
3427 | 3425 | ||
3428 | rx_ring_summary: | 3426 | rx_ring_summary: |
3429 | /* | 3427 | /* receive dump */ |
3430 | * receive dump | ||
3431 | */ | ||
3432 | pr_info("\nRX Desc ring dump\n"); | 3428 | pr_info("\nRX Desc ring dump\n"); |
3433 | 3429 | ||
3434 | /* Legacy Receive Descriptor Format | 3430 | /* Legacy Receive Descriptor Format |
@@ -3493,7 +3489,6 @@ exit: | |||
3493 | * e1000_tx_timeout - Respond to a Tx Hang | 3489 | * e1000_tx_timeout - Respond to a Tx Hang |
3494 | * @netdev: network interface device structure | 3490 | * @netdev: network interface device structure |
3495 | **/ | 3491 | **/ |
3496 | |||
3497 | static void e1000_tx_timeout(struct net_device *netdev) | 3492 | static void e1000_tx_timeout(struct net_device *netdev) |
3498 | { | 3493 | { |
3499 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3494 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work) | |||
3521 | * Returns the address of the device statistics structure. | 3516 | * Returns the address of the device statistics structure. |
3522 | * The statistics are actually updated from the watchdog. | 3517 | * The statistics are actually updated from the watchdog. |
3523 | **/ | 3518 | **/ |
3524 | |||
3525 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | 3519 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
3526 | { | 3520 | { |
3527 | /* only return the current stats */ | 3521 | /* only return the current stats */ |
@@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | |||
3535 | * | 3529 | * |
3536 | * Returns 0 on success, negative on failure | 3530 | * Returns 0 on success, negative on failure |
3537 | **/ | 3531 | **/ |
3538 | |||
3539 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | 3532 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
3540 | { | 3533 | { |
3541 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3534 | struct e1000_adapter *adapter = netdev_priv(netdev); |
@@ -3572,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3572 | * means we reserve 2 more, this pushes us to allocate from the next | 3565 | * means we reserve 2 more, this pushes us to allocate from the next |
3573 | * larger slab size. | 3566 | * larger slab size. |
3574 | * i.e. RXBUFFER_2048 --> size-4096 slab | 3567 | * i.e. RXBUFFER_2048 --> size-4096 slab |
3575 | * however with the new *_jumbo_rx* routines, jumbo receives will use | 3568 | * however with the new *_jumbo_rx* routines, jumbo receives will use |
3576 | * fragmented skbs */ | 3569 | * fragmented skbs |
3570 | */ | ||
3577 | 3571 | ||
3578 | if (max_frame <= E1000_RXBUFFER_2048) | 3572 | if (max_frame <= E1000_RXBUFFER_2048) |
3579 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | 3573 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
@@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3608 | * e1000_update_stats - Update the board statistics counters | 3602 | * e1000_update_stats - Update the board statistics counters |
3609 | * @adapter: board private structure | 3603 | * @adapter: board private structure |
3610 | **/ | 3604 | **/ |
3611 | |||
3612 | void e1000_update_stats(struct e1000_adapter *adapter) | 3605 | void e1000_update_stats(struct e1000_adapter *adapter) |
3613 | { | 3606 | { |
3614 | struct net_device *netdev = adapter->netdev; | 3607 | struct net_device *netdev = adapter->netdev; |
@@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter) | |||
3619 | 3612 | ||
3620 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | 3613 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
3621 | 3614 | ||
3622 | /* | 3615 | /* Prevent stats update while adapter is being reset, or if the pci |
3623 | * Prevent stats update while adapter is being reset, or if the pci | ||
3624 | * connection is down. | 3616 | * connection is down. |
3625 | */ | 3617 | */ |
3626 | if (adapter->link_speed == 0) | 3618 | if (adapter->link_speed == 0) |
@@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter) | |||
3710 | /* Rx Errors */ | 3702 | /* Rx Errors */ |
3711 | 3703 | ||
3712 | /* RLEC on some newer hardware can be incorrect so build | 3704 | /* RLEC on some newer hardware can be incorrect so build |
3713 | * our own version based on RUC and ROC */ | 3705 | * our own version based on RUC and ROC |
3706 | */ | ||
3714 | netdev->stats.rx_errors = adapter->stats.rxerrc + | 3707 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
3715 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3708 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3716 | adapter->stats.ruc + adapter->stats.roc + | 3709 | adapter->stats.ruc + adapter->stats.roc + |
@@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter) | |||
3764 | * @irq: interrupt number | 3757 | * @irq: interrupt number |
3765 | * @data: pointer to a network interface device structure | 3758 | * @data: pointer to a network interface device structure |
3766 | **/ | 3759 | **/ |
3767 | |||
3768 | static irqreturn_t e1000_intr(int irq, void *data) | 3760 | static irqreturn_t e1000_intr(int irq, void *data) |
3769 | { | 3761 | { |
3770 | struct net_device *netdev = data; | 3762 | struct net_device *netdev = data; |
@@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3775 | if (unlikely((!icr))) | 3767 | if (unlikely((!icr))) |
3776 | return IRQ_NONE; /* Not our interrupt */ | 3768 | return IRQ_NONE; /* Not our interrupt */ |
3777 | 3769 | ||
3778 | /* | 3770 | /* we might have caused the interrupt, but the above |
3779 | * we might have caused the interrupt, but the above | ||
3780 | * read cleared it, and just in case the driver is | 3771 | * read cleared it, and just in case the driver is |
3781 | * down there is nothing to do so return handled | 3772 | * down there is nothing to do so return handled |
3782 | */ | 3773 | */ |
@@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3802 | __napi_schedule(&adapter->napi); | 3793 | __napi_schedule(&adapter->napi); |
3803 | } else { | 3794 | } else { |
3804 | /* this really should not happen! if it does it is basically a | 3795 | /* this really should not happen! if it does it is basically a |
3805 | * bug, but not a hard error, so enable ints and continue */ | 3796 | * bug, but not a hard error, so enable ints and continue |
3797 | */ | ||
3806 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 3798 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3807 | e1000_irq_enable(adapter); | 3799 | e1000_irq_enable(adapter); |
3808 | } | 3800 | } |
@@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3816 | **/ | 3808 | **/ |
3817 | static int e1000_clean(struct napi_struct *napi, int budget) | 3809 | static int e1000_clean(struct napi_struct *napi, int budget) |
3818 | { | 3810 | { |
3819 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | 3811 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, |
3812 | napi); | ||
3820 | int tx_clean_complete = 0, work_done = 0; | 3813 | int tx_clean_complete = 0, work_done = 0; |
3821 | 3814 | ||
3822 | tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); | 3815 | tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); |
@@ -3907,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3907 | 3900 | ||
3908 | if (adapter->detect_tx_hung) { | 3901 | if (adapter->detect_tx_hung) { |
3909 | /* Detect a transmit hang in hardware, this serializes the | 3902 | /* Detect a transmit hang in hardware, this serializes the |
3910 | * check with the clearing of time_stamp and movement of i */ | 3903 | * check with the clearing of time_stamp and movement of i |
3904 | */ | ||
3911 | adapter->detect_tx_hung = false; | 3905 | adapter->detect_tx_hung = false; |
3912 | if (tx_ring->buffer_info[eop].time_stamp && | 3906 | if (tx_ring->buffer_info[eop].time_stamp && |
3913 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | 3907 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
3914 | (adapter->tx_timeout_factor * HZ)) && | 3908 | (adapter->tx_timeout_factor * HZ)) && |
3915 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { | 3909 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
3916 | 3910 | ||
3917 | /* detected Tx unit hang */ | 3911 | /* detected Tx unit hang */ |
@@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3954 | * @csum: receive descriptor csum field | 3948 | * @csum: receive descriptor csum field |
3955 | * @sk_buff: socket buffer with received data | 3949 | * @sk_buff: socket buffer with received data |
3956 | **/ | 3950 | **/ |
3957 | |||
3958 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | 3951 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, |
3959 | u32 csum, struct sk_buff *skb) | 3952 | u32 csum, struct sk_buff *skb) |
3960 | { | 3953 | { |
@@ -3990,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
3990 | * e1000_consume_page - helper function | 3983 | * e1000_consume_page - helper function |
3991 | **/ | 3984 | **/ |
3992 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | 3985 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, |
3993 | u16 length) | 3986 | u16 length) |
3994 | { | 3987 | { |
3995 | bi->page = NULL; | 3988 | bi->page = NULL; |
3996 | skb->len += length; | 3989 | skb->len += length; |
@@ -4086,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |||
4086 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, | 4079 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, |
4087 | last_byte)) { | 4080 | last_byte)) { |
4088 | spin_lock_irqsave(&adapter->stats_lock, | 4081 | spin_lock_irqsave(&adapter->stats_lock, |
4089 | irq_flags); | 4082 | irq_flags); |
4090 | e1000_tbi_adjust_stats(hw, &adapter->stats, | 4083 | e1000_tbi_adjust_stats(hw, &adapter->stats, |
4091 | length, mapped); | 4084 | length, mapped); |
4092 | spin_unlock_irqrestore(&adapter->stats_lock, | 4085 | spin_unlock_irqrestore(&adapter->stats_lock, |
4093 | irq_flags); | 4086 | irq_flags); |
4094 | length--; | 4087 | length--; |
4095 | } else { | 4088 | } else { |
4096 | if (netdev->features & NETIF_F_RXALL) | 4089 | if (netdev->features & NETIF_F_RXALL) |
@@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | |||
4098 | /* recycle both page and skb */ | 4091 | /* recycle both page and skb */ |
4099 | buffer_info->skb = skb; | 4092 | buffer_info->skb = skb; |
4100 | /* an error means any chain goes out the window | 4093 | /* an error means any chain goes out the window |
4101 | * too */ | 4094 | * too |
4095 | */ | ||
4102 | if (rx_ring->rx_skb_top) | 4096 | if (rx_ring->rx_skb_top) |
4103 | dev_kfree_skb(rx_ring->rx_skb_top); | 4097 | dev_kfree_skb(rx_ring->rx_skb_top); |
4104 | rx_ring->rx_skb_top = NULL; | 4098 | rx_ring->rx_skb_top = NULL; |
@@ -4114,7 +4108,7 @@ process_skb: | |||
4114 | /* this is the beginning of a chain */ | 4108 | /* this is the beginning of a chain */ |
4115 | rxtop = skb; | 4109 | rxtop = skb; |
4116 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | 4110 | skb_fill_page_desc(rxtop, 0, buffer_info->page, |
4117 | 0, length); | 4111 | 0, length); |
4118 | } else { | 4112 | } else { |
4119 | /* this is the middle of a chain */ | 4113 | /* this is the middle of a chain */ |
4120 | skb_fill_page_desc(rxtop, | 4114 | skb_fill_page_desc(rxtop, |
@@ -4132,38 +4126,42 @@ process_skb: | |||
4132 | skb_shinfo(rxtop)->nr_frags, | 4126 | skb_shinfo(rxtop)->nr_frags, |
4133 | buffer_info->page, 0, length); | 4127 | buffer_info->page, 0, length); |
4134 | /* re-use the current skb, we only consumed the | 4128 | /* re-use the current skb, we only consumed the |
4135 | * page */ | 4129 | * page |
4130 | */ | ||
4136 | buffer_info->skb = skb; | 4131 | buffer_info->skb = skb; |
4137 | skb = rxtop; | 4132 | skb = rxtop; |
4138 | rxtop = NULL; | 4133 | rxtop = NULL; |
4139 | e1000_consume_page(buffer_info, skb, length); | 4134 | e1000_consume_page(buffer_info, skb, length); |
4140 | } else { | 4135 | } else { |
4141 | /* no chain, got EOP, this buf is the packet | 4136 | /* no chain, got EOP, this buf is the packet |
4142 | * copybreak to save the put_page/alloc_page */ | 4137 | * copybreak to save the put_page/alloc_page |
4138 | */ | ||
4143 | if (length <= copybreak && | 4139 | if (length <= copybreak && |
4144 | skb_tailroom(skb) >= length) { | 4140 | skb_tailroom(skb) >= length) { |
4145 | u8 *vaddr; | 4141 | u8 *vaddr; |
4146 | vaddr = kmap_atomic(buffer_info->page); | 4142 | vaddr = kmap_atomic(buffer_info->page); |
4147 | memcpy(skb_tail_pointer(skb), vaddr, length); | 4143 | memcpy(skb_tail_pointer(skb), vaddr, |
4144 | length); | ||
4148 | kunmap_atomic(vaddr); | 4145 | kunmap_atomic(vaddr); |
4149 | /* re-use the page, so don't erase | 4146 | /* re-use the page, so don't erase |
4150 | * buffer_info->page */ | 4147 | * buffer_info->page |
4148 | */ | ||
4151 | skb_put(skb, length); | 4149 | skb_put(skb, length); |
4152 | } else { | 4150 | } else { |
4153 | skb_fill_page_desc(skb, 0, | 4151 | skb_fill_page_desc(skb, 0, |
4154 | buffer_info->page, 0, | 4152 | buffer_info->page, 0, |
4155 | length); | 4153 | length); |
4156 | e1000_consume_page(buffer_info, skb, | 4154 | e1000_consume_page(buffer_info, skb, |
4157 | length); | 4155 | length); |
4158 | } | 4156 | } |
4159 | } | 4157 | } |
4160 | } | 4158 | } |
4161 | 4159 | ||
4162 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | 4160 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ |
4163 | e1000_rx_checksum(adapter, | 4161 | e1000_rx_checksum(adapter, |
4164 | (u32)(status) | | 4162 | (u32)(status) | |
4165 | ((u32)(rx_desc->errors) << 24), | 4163 | ((u32)(rx_desc->errors) << 24), |
4166 | le16_to_cpu(rx_desc->csum), skb); | 4164 | le16_to_cpu(rx_desc->csum), skb); |
4167 | 4165 | ||
4168 | total_rx_bytes += (skb->len - 4); /* don't count FCS */ | 4166 | total_rx_bytes += (skb->len - 4); /* don't count FCS */ |
4169 | if (likely(!(netdev->features & NETIF_F_RXFCS))) | 4167 | if (likely(!(netdev->features & NETIF_F_RXFCS))) |
@@ -4205,8 +4203,7 @@ next_desc: | |||
4205 | return cleaned; | 4203 | return cleaned; |
4206 | } | 4204 | } |
4207 | 4205 | ||
4208 | /* | 4206 | /* this should improve performance for small packets with large amounts |
4209 | * this should improve performance for small packets with large amounts | ||
4210 | * of reassembly being done in the stack | 4207 | * of reassembly being done in the stack |
4211 | */ | 4208 | */ |
4212 | static void e1000_check_copybreak(struct net_device *netdev, | 4209 | static void e1000_check_copybreak(struct net_device *netdev, |
@@ -4310,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4310 | last_byte)) { | 4307 | last_byte)) { |
4311 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4308 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4312 | e1000_tbi_adjust_stats(hw, &adapter->stats, | 4309 | e1000_tbi_adjust_stats(hw, &adapter->stats, |
4313 | length, skb->data); | 4310 | length, skb->data); |
4314 | spin_unlock_irqrestore(&adapter->stats_lock, | 4311 | spin_unlock_irqrestore(&adapter->stats_lock, |
4315 | flags); | 4312 | flags); |
4316 | length--; | 4313 | length--; |
4317 | } else { | 4314 | } else { |
4318 | if (netdev->features & NETIF_F_RXALL) | 4315 | if (netdev->features & NETIF_F_RXALL) |
@@ -4377,10 +4374,9 @@ next_desc: | |||
4377 | * @rx_ring: pointer to receive ring structure | 4374 | * @rx_ring: pointer to receive ring structure |
4378 | * @cleaned_count: number of buffers to allocate this pass | 4375 | * @cleaned_count: number of buffers to allocate this pass |
4379 | **/ | 4376 | **/ |
4380 | |||
4381 | static void | 4377 | static void |
4382 | e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | 4378 | e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, |
4383 | struct e1000_rx_ring *rx_ring, int cleaned_count) | 4379 | struct e1000_rx_ring *rx_ring, int cleaned_count) |
4384 | { | 4380 | { |
4385 | struct net_device *netdev = adapter->netdev; | 4381 | struct net_device *netdev = adapter->netdev; |
4386 | struct pci_dev *pdev = adapter->pdev; | 4382 | struct pci_dev *pdev = adapter->pdev; |
@@ -4421,7 +4417,7 @@ check_page: | |||
4421 | 4417 | ||
4422 | if (!buffer_info->dma) { | 4418 | if (!buffer_info->dma) { |
4423 | buffer_info->dma = dma_map_page(&pdev->dev, | 4419 | buffer_info->dma = dma_map_page(&pdev->dev, |
4424 | buffer_info->page, 0, | 4420 | buffer_info->page, 0, |
4425 | buffer_info->length, | 4421 | buffer_info->length, |
4426 | DMA_FROM_DEVICE); | 4422 | DMA_FROM_DEVICE); |
4427 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 4423 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
@@ -4451,7 +4447,8 @@ check_page: | |||
4451 | /* Force memory writes to complete before letting h/w | 4447 | /* Force memory writes to complete before letting h/w |
4452 | * know there are new descriptors to fetch. (Only | 4448 | * know there are new descriptors to fetch. (Only |
4453 | * applicable for weak-ordered memory model archs, | 4449 | * applicable for weak-ordered memory model archs, |
4454 | * such as IA-64). */ | 4450 | * such as IA-64). |
4451 | */ | ||
4455 | wmb(); | 4452 | wmb(); |
4456 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); | 4453 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); |
4457 | } | 4454 | } |
@@ -4461,7 +4458,6 @@ check_page: | |||
4461 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | 4458 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
4462 | * @adapter: address of board private structure | 4459 | * @adapter: address of board private structure |
4463 | **/ | 4460 | **/ |
4464 | |||
4465 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 4461 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
4466 | struct e1000_rx_ring *rx_ring, | 4462 | struct e1000_rx_ring *rx_ring, |
4467 | int cleaned_count) | 4463 | int cleaned_count) |
@@ -4532,8 +4528,7 @@ map_skb: | |||
4532 | break; /* while !buffer_info->skb */ | 4528 | break; /* while !buffer_info->skb */ |
4533 | } | 4529 | } |
4534 | 4530 | ||
4535 | /* | 4531 | /* XXX if it was allocated cleanly it will never map to a |
4536 | * XXX if it was allocated cleanly it will never map to a | ||
4537 | * boundary crossing | 4532 | * boundary crossing |
4538 | */ | 4533 | */ |
4539 | 4534 | ||
@@ -4571,7 +4566,8 @@ map_skb: | |||
4571 | /* Force memory writes to complete before letting h/w | 4566 | /* Force memory writes to complete before letting h/w |
4572 | * know there are new descriptors to fetch. (Only | 4567 | * know there are new descriptors to fetch. (Only |
4573 | * applicable for weak-ordered memory model archs, | 4568 | * applicable for weak-ordered memory model archs, |
4574 | * such as IA-64). */ | 4569 | * such as IA-64). |
4570 | */ | ||
4575 | wmb(); | 4571 | wmb(); |
4576 | writel(i, hw->hw_addr + rx_ring->rdt); | 4572 | writel(i, hw->hw_addr + rx_ring->rdt); |
4577 | } | 4573 | } |
@@ -4581,7 +4577,6 @@ map_skb: | |||
4581 | * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. | 4577 | * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. |
4582 | * @adapter: | 4578 | * @adapter: |
4583 | **/ | 4579 | **/ |
4584 | |||
4585 | static void e1000_smartspeed(struct e1000_adapter *adapter) | 4580 | static void e1000_smartspeed(struct e1000_adapter *adapter) |
4586 | { | 4581 | { |
4587 | struct e1000_hw *hw = &adapter->hw; | 4582 | struct e1000_hw *hw = &adapter->hw; |
@@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) | |||
4594 | 4589 | ||
4595 | if (adapter->smartspeed == 0) { | 4590 | if (adapter->smartspeed == 0) { |
4596 | /* If Master/Slave config fault is asserted twice, | 4591 | /* If Master/Slave config fault is asserted twice, |
4597 | * we assume back-to-back */ | 4592 | * we assume back-to-back |
4593 | */ | ||
4598 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); | 4594 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
4599 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4595 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4600 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); | 4596 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
@@ -4607,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) | |||
4607 | adapter->smartspeed++; | 4603 | adapter->smartspeed++; |
4608 | if (!e1000_phy_setup_autoneg(hw) && | 4604 | if (!e1000_phy_setup_autoneg(hw) && |
4609 | !e1000_read_phy_reg(hw, PHY_CTRL, | 4605 | !e1000_read_phy_reg(hw, PHY_CTRL, |
4610 | &phy_ctrl)) { | 4606 | &phy_ctrl)) { |
4611 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4607 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4612 | MII_CR_RESTART_AUTO_NEG); | 4608 | MII_CR_RESTART_AUTO_NEG); |
4613 | e1000_write_phy_reg(hw, PHY_CTRL, | 4609 | e1000_write_phy_reg(hw, PHY_CTRL, |
@@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter) | |||
4638 | * @ifreq: | 4634 | * @ifreq: |
4639 | * @cmd: | 4635 | * @cmd: |
4640 | **/ | 4636 | **/ |
4641 | |||
4642 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | 4637 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
4643 | { | 4638 | { |
4644 | switch (cmd) { | 4639 | switch (cmd) { |
@@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4657 | * @ifreq: | 4652 | * @ifreq: |
4658 | * @cmd: | 4653 | * @cmd: |
4659 | **/ | 4654 | **/ |
4660 | |||
4661 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 4655 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
4662 | int cmd) | 4656 | int cmd) |
4663 | { | 4657 | { |
@@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) | |||
4919 | hw->autoneg = 0; | 4913 | hw->autoneg = 0; |
4920 | 4914 | ||
4921 | /* Make sure dplx is at most 1 bit and lsb of speed is not set | 4915 | /* Make sure dplx is at most 1 bit and lsb of speed is not set |
4922 | * for the switch() below to work */ | 4916 | * for the switch() below to work |
4917 | */ | ||
4923 | if ((spd & 1) || (dplx & ~1)) | 4918 | if ((spd & 1) || (dplx & ~1)) |
4924 | goto err_inval; | 4919 | goto err_inval; |
4925 | 4920 | ||
@@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev) | |||
5122 | } | 5117 | } |
5123 | 5118 | ||
5124 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5119 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5125 | /* | 5120 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
5126 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
5127 | * without having to re-enable interrupts. It's not called while | 5121 | * without having to re-enable interrupts. It's not called while |
5128 | * the interrupt routine is executing. | 5122 | * the interrupt routine is executing. |
5129 | */ | 5123 | */ |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c index 750fc0194f37..c9cde352b1c8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_param.c +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c | |||
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); | |||
267 | * value exists, a default value is used. The final value is stored | 267 | * value exists, a default value is used. The final value is stored |
268 | * in a variable in the adapter structure. | 268 | * in a variable in the adapter structure. |
269 | **/ | 269 | **/ |
270 | |||
271 | void e1000_check_options(struct e1000_adapter *adapter) | 270 | void e1000_check_options(struct e1000_adapter *adapter) |
272 | { | 271 | { |
273 | struct e1000_option opt; | 272 | struct e1000_option opt; |
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter) | |||
319 | .def = E1000_DEFAULT_RXD, | 318 | .def = E1000_DEFAULT_RXD, |
320 | .arg = { .r = { | 319 | .arg = { .r = { |
321 | .min = E1000_MIN_RXD, | 320 | .min = E1000_MIN_RXD, |
322 | .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD | 321 | .max = mac_type < e1000_82544 ? E1000_MAX_RXD : |
322 | E1000_MAX_82544_RXD | ||
323 | }} | 323 | }} |
324 | }; | 324 | }; |
325 | 325 | ||
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter) | |||
408 | if (num_TxAbsIntDelay > bd) { | 408 | if (num_TxAbsIntDelay > bd) { |
409 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; | 409 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; |
410 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, | 410 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, |
411 | adapter); | 411 | adapter); |
412 | } else { | 412 | } else { |
413 | adapter->tx_abs_int_delay = opt.def; | 413 | adapter->tx_abs_int_delay = opt.def; |
414 | } | 414 | } |
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter) | |||
426 | if (num_RxIntDelay > bd) { | 426 | if (num_RxIntDelay > bd) { |
427 | adapter->rx_int_delay = RxIntDelay[bd]; | 427 | adapter->rx_int_delay = RxIntDelay[bd]; |
428 | e1000_validate_option(&adapter->rx_int_delay, &opt, | 428 | e1000_validate_option(&adapter->rx_int_delay, &opt, |
429 | adapter); | 429 | adapter); |
430 | } else { | 430 | } else { |
431 | adapter->rx_int_delay = opt.def; | 431 | adapter->rx_int_delay = opt.def; |
432 | } | 432 | } |
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter) | |||
444 | if (num_RxAbsIntDelay > bd) { | 444 | if (num_RxAbsIntDelay > bd) { |
445 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; | 445 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; |
446 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, | 446 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, |
447 | adapter); | 447 | adapter); |
448 | } else { | 448 | } else { |
449 | adapter->rx_abs_int_delay = opt.def; | 449 | adapter->rx_abs_int_delay = opt.def; |
450 | } | 450 | } |
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter) | |||
479 | break; | 479 | break; |
480 | case 4: | 480 | case 4: |
481 | e_dev_info("%s set to simplified " | 481 | e_dev_info("%s set to simplified " |
482 | "(2000-8000) ints mode\n", opt.name); | 482 | "(2000-8000) ints mode\n", opt.name); |
483 | adapter->itr_setting = adapter->itr; | 483 | adapter->itr_setting = adapter->itr; |
484 | break; | 484 | break; |
485 | default: | 485 | default: |
486 | e1000_validate_option(&adapter->itr, &opt, | 486 | e1000_validate_option(&adapter->itr, &opt, |
487 | adapter); | 487 | adapter); |
488 | /* save the setting, because the dynamic bits | 488 | /* save the setting, because the dynamic bits |
489 | * change itr. | 489 | * change itr. |
490 | * clear the lower two bits because they are | 490 | * clear the lower two bits because they are |
491 | * used as control */ | 491 | * used as control |
492 | */ | ||
492 | adapter->itr_setting = adapter->itr & ~3; | 493 | adapter->itr_setting = adapter->itr & ~3; |
493 | break; | 494 | break; |
494 | } | 495 | } |
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter) | |||
533 | * | 534 | * |
534 | * Handles speed and duplex options on fiber adapters | 535 | * Handles speed and duplex options on fiber adapters |
535 | **/ | 536 | **/ |
536 | |||
537 | static void e1000_check_fiber_options(struct e1000_adapter *adapter) | 537 | static void e1000_check_fiber_options(struct e1000_adapter *adapter) |
538 | { | 538 | { |
539 | int bd = adapter->bd_number; | 539 | int bd = adapter->bd_number; |
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter) | |||
559 | * | 559 | * |
560 | * Handles speed and duplex options on copper adapters | 560 | * Handles speed and duplex options on copper adapters |
561 | **/ | 561 | **/ |
562 | |||
563 | static void e1000_check_copper_options(struct e1000_adapter *adapter) | 562 | static void e1000_check_copper_options(struct e1000_adapter *adapter) |
564 | { | 563 | { |
565 | struct e1000_option opt; | 564 | struct e1000_option opt; |
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) | |||
681 | e_dev_info("Using Autonegotiation at Half Duplex only\n"); | 680 | e_dev_info("Using Autonegotiation at Half Duplex only\n"); |
682 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 681 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
683 | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | | 682 | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | |
684 | ADVERTISE_100_HALF; | 683 | ADVERTISE_100_HALF; |
685 | break; | 684 | break; |
686 | case FULL_DUPLEX: | 685 | case FULL_DUPLEX: |
687 | e_dev_info("Full Duplex specified without Speed\n"); | 686 | e_dev_info("Full Duplex specified without Speed\n"); |
688 | e_dev_info("Using Autonegotiation at Full Duplex only\n"); | 687 | e_dev_info("Using Autonegotiation at Full Duplex only\n"); |
689 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 688 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
690 | adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | | 689 | adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | |
691 | ADVERTISE_100_FULL | | 690 | ADVERTISE_100_FULL | |
692 | ADVERTISE_1000_FULL; | 691 | ADVERTISE_1000_FULL; |
693 | break; | 692 | break; |
694 | case SPEED_10: | 693 | case SPEED_10: |
695 | e_dev_info("10 Mbps Speed specified without Duplex\n"); | 694 | e_dev_info("10 Mbps Speed specified without Duplex\n"); |
696 | e_dev_info("Using Autonegotiation at 10 Mbps only\n"); | 695 | e_dev_info("Using Autonegotiation at 10 Mbps only\n"); |
697 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 696 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
698 | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | | 697 | adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | |
699 | ADVERTISE_10_FULL; | 698 | ADVERTISE_10_FULL; |
700 | break; | 699 | break; |
701 | case SPEED_10 + HALF_DUPLEX: | 700 | case SPEED_10 + HALF_DUPLEX: |
702 | e_dev_info("Forcing to 10 Mbps Half Duplex\n"); | 701 | e_dev_info("Forcing to 10 Mbps Half Duplex\n"); |
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) | |||
715 | e_dev_info("Using Autonegotiation at 100 Mbps only\n"); | 714 | e_dev_info("Using Autonegotiation at 100 Mbps only\n"); |
716 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 715 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
717 | adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | | 716 | adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | |
718 | ADVERTISE_100_FULL; | 717 | ADVERTISE_100_FULL; |
719 | break; | 718 | break; |
720 | case SPEED_100 + HALF_DUPLEX: | 719 | case SPEED_100 + HALF_DUPLEX: |
721 | e_dev_info("Forcing to 100 Mbps Half Duplex\n"); | 720 | e_dev_info("Forcing to 100 Mbps Half Duplex\n"); |