diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2008-09-11 22:59:59 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-24 18:54:59 -0400 |
commit | c44ade9ef8ffd73cb8b026065ade78bc0040f0b4 (patch) | |
tree | 4e873bc57bccb30e23cec99ed06ec58ba7251e39 /drivers/net/ixgbe | |
parent | f08482766b7e3c0b2aaac4b68b30f33a91703aa3 (diff) |
ixgbe: update to latest common code module
This is a massive update that includes infrastructure for further patches
where we will add support for more phy types and eeprom types.
This code is shared as much as possible with other drivers, so the code may
seem a little obtuse at times but wherever possible we keep to the linux
style and methods.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82598.c | 600 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 914 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.h | 59 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 20 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 105 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 241 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.h | 60 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 513 |
9 files changed, 1700 insertions, 814 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 27db64f5c860..2388af246720 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -41,8 +41,6 @@ | |||
41 | #include <linux/dca.h> | 41 | #include <linux/dca.h> |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args) | ||
45 | |||
46 | #define PFX "ixgbe: " | 44 | #define PFX "ixgbe: " |
47 | #define DPRINTK(nlevel, klevel, fmt, args...) \ | 45 | #define DPRINTK(nlevel, klevel, fmt, args...) \ |
48 | ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ | 46 | ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 07261406cd63..a08a267f1667 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -39,68 +39,80 @@ | |||
39 | #define IXGBE_82598_MC_TBL_SIZE 128 | 39 | #define IXGBE_82598_MC_TBL_SIZE 128 |
40 | #define IXGBE_82598_VFT_TBL_SIZE 128 | 40 | #define IXGBE_82598_VFT_TBL_SIZE 128 |
41 | 41 | ||
42 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); | 42 | static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, |
43 | static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, | 43 | ixgbe_link_speed *speed, |
44 | bool *autoneg); | 44 | bool *autoneg); |
45 | static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, | ||
46 | u32 *speed, bool *autoneg); | ||
47 | static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); | 45 | static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); |
46 | static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num); | ||
48 | static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); | 47 | static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); |
49 | static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, | 48 | static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, |
50 | bool *link_up, | 49 | ixgbe_link_speed *speed, |
51 | bool link_up_wait_to_complete); | 50 | bool *link_up, bool link_up_wait_to_complete); |
52 | static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed, | 51 | static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, |
53 | bool autoneg, | 52 | ixgbe_link_speed speed, |
54 | bool autoneg_wait_to_complete); | 53 | bool autoneg, |
54 | bool autoneg_wait_to_complete); | ||
55 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); | 55 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); |
56 | static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, | 56 | static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, |
57 | bool autoneg, | 57 | ixgbe_link_speed speed, |
58 | bool autoneg_wait_to_complete); | 58 | bool autoneg, |
59 | bool autoneg_wait_to_complete); | ||
59 | static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); | 60 | static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); |
61 | static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); | ||
62 | static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); | ||
63 | static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, | ||
64 | u32 vind, bool vlan_on); | ||
65 | static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); | ||
66 | static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index); | ||
67 | static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index); | ||
68 | static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); | ||
69 | static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); | ||
70 | static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); | ||
71 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); | ||
60 | 72 | ||
61 | 73 | /** | |
74 | */ | ||
62 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | 75 | static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) |
63 | { | 76 | { |
64 | hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; | 77 | struct ixgbe_mac_info *mac = &hw->mac; |
65 | hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; | 78 | struct ixgbe_phy_info *phy = &hw->phy; |
66 | hw->mac.mcft_size = IXGBE_82598_MC_TBL_SIZE; | 79 | |
67 | hw->mac.vft_size = IXGBE_82598_VFT_TBL_SIZE; | 80 | /* Call PHY identify routine to get the phy type */ |
68 | hw->mac.num_rar_entries = IXGBE_82598_RAR_ENTRIES; | 81 | ixgbe_identify_phy_generic(hw); |
69 | 82 | ||
70 | /* PHY ops are filled in by default properly for Fiber only */ | 83 | /* PHY Init */ |
71 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { | 84 | switch (phy->type) { |
72 | hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598; | 85 | default: |
73 | hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; | 86 | break; |
74 | hw->mac.ops.get_link_settings = | 87 | } |
75 | &ixgbe_get_copper_link_settings_82598; | 88 | |
76 | 89 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { | |
77 | /* Call PHY identify routine to get the phy type */ | 90 | mac->ops.setup_link = &ixgbe_setup_copper_link_82598; |
78 | ixgbe_identify_phy(hw); | 91 | mac->ops.setup_link_speed = |
79 | 92 | &ixgbe_setup_copper_link_speed_82598; | |
80 | switch (hw->phy.type) { | 93 | mac->ops.get_link_capabilities = |
81 | case ixgbe_phy_tn: | 94 | &ixgbe_get_copper_link_capabilities_82598; |
82 | hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link; | ||
83 | hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link; | ||
84 | hw->phy.ops.setup_link_speed = | ||
85 | &ixgbe_setup_tnx_phy_link_speed; | ||
86 | break; | ||
87 | default: | ||
88 | break; | ||
89 | } | ||
90 | } | 95 | } |
91 | 96 | ||
97 | mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; | ||
98 | mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; | ||
99 | mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; | ||
100 | mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; | ||
101 | mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; | ||
102 | |||
92 | return 0; | 103 | return 0; |
93 | } | 104 | } |
94 | 105 | ||
95 | /** | 106 | /** |
96 | * ixgbe_get_link_settings_82598 - Determines default link settings | 107 | * ixgbe_get_link_capabilities_82598 - Determines link capabilities |
97 | * @hw: pointer to hardware structure | 108 | * @hw: pointer to hardware structure |
98 | * @speed: pointer to link speed | 109 | * @speed: pointer to link speed |
99 | * @autoneg: boolean auto-negotiation value | 110 | * @autoneg: boolean auto-negotiation value |
100 | * | 111 | * |
101 | * Determines the default link settings by reading the AUTOC register. | 112 | * Determines the link capabilities by reading the AUTOC register. |
102 | **/ | 113 | **/ |
103 | static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, | 114 | static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, |
115 | ixgbe_link_speed *speed, | ||
104 | bool *autoneg) | 116 | bool *autoneg) |
105 | { | 117 | { |
106 | s32 status = 0; | 118 | s32 status = 0; |
@@ -150,15 +162,16 @@ static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, | |||
150 | } | 162 | } |
151 | 163 | ||
152 | /** | 164 | /** |
153 | * ixgbe_get_copper_link_settings_82598 - Determines default link settings | 165 | * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities |
154 | * @hw: pointer to hardware structure | 166 | * @hw: pointer to hardware structure |
155 | * @speed: pointer to link speed | 167 | * @speed: pointer to link speed |
156 | * @autoneg: boolean auto-negotiation value | 168 | * @autoneg: boolean auto-negotiation value |
157 | * | 169 | * |
158 | * Determines the default link settings by reading the AUTOC register. | 170 | * Determines the link capabilities by reading the AUTOC register. |
159 | **/ | 171 | **/ |
160 | static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, | 172 | s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, |
161 | u32 *speed, bool *autoneg) | 173 | ixgbe_link_speed *speed, |
174 | bool *autoneg) | ||
162 | { | 175 | { |
163 | s32 status = IXGBE_ERR_LINK_SETUP; | 176 | s32 status = IXGBE_ERR_LINK_SETUP; |
164 | u16 speed_ability; | 177 | u16 speed_ability; |
@@ -166,7 +179,7 @@ static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, | |||
166 | *speed = 0; | 179 | *speed = 0; |
167 | *autoneg = true; | 180 | *autoneg = true; |
168 | 181 | ||
169 | status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, | 182 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, |
170 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | 183 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
171 | &speed_ability); | 184 | &speed_ability); |
172 | 185 | ||
@@ -199,9 +212,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) | |||
199 | case IXGBE_DEV_ID_82598EB_XF_LR: | 212 | case IXGBE_DEV_ID_82598EB_XF_LR: |
200 | media_type = ixgbe_media_type_fiber; | 213 | media_type = ixgbe_media_type_fiber; |
201 | break; | 214 | break; |
202 | case IXGBE_DEV_ID_82598AT_DUAL_PORT: | ||
203 | media_type = ixgbe_media_type_copper; | ||
204 | break; | ||
205 | default: | 215 | default: |
206 | media_type = ixgbe_media_type_unknown; | 216 | media_type = ixgbe_media_type_unknown; |
207 | break; | 217 | break; |
@@ -211,6 +221,122 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) | |||
211 | } | 221 | } |
212 | 222 | ||
213 | /** | 223 | /** |
224 | * ixgbe_setup_fc_82598 - Configure flow control settings | ||
225 | * @hw: pointer to hardware structure | ||
226 | * @packetbuf_num: packet buffer number (0-7) | ||
227 | * | ||
228 | * Configures the flow control settings based on SW configuration. This | ||
229 | * function is used for 802.3x flow control configuration only. | ||
230 | **/ | ||
231 | s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) | ||
232 | { | ||
233 | u32 frctl_reg; | ||
234 | u32 rmcs_reg; | ||
235 | |||
236 | if (packetbuf_num < 0 || packetbuf_num > 7) { | ||
237 | hw_dbg(hw, "Invalid packet buffer number [%d], expected range is" | ||
238 | " 0-7\n", packetbuf_num); | ||
239 | } | ||
240 | |||
241 | frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); | ||
242 | frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); | ||
243 | |||
244 | rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); | ||
245 | rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); | ||
246 | |||
247 | /* | ||
248 | * 10 gig parts do not have a word in the EEPROM to determine the | ||
249 | * default flow control setting, so we explicitly set it to full. | ||
250 | */ | ||
251 | if (hw->fc.type == ixgbe_fc_default) | ||
252 | hw->fc.type = ixgbe_fc_full; | ||
253 | |||
254 | /* | ||
255 | * We want to save off the original Flow Control configuration just in | ||
256 | * case we get disconnected and then reconnected into a different hub | ||
257 | * or switch with different Flow Control capabilities. | ||
258 | */ | ||
259 | hw->fc.original_type = hw->fc.type; | ||
260 | |||
261 | /* | ||
262 | * The possible values of the "flow_control" parameter are: | ||
263 | * 0: Flow control is completely disabled | ||
264 | * 1: Rx flow control is enabled (we can receive pause frames but not | ||
265 | * send pause frames). | ||
266 | * 2: Tx flow control is enabled (we can send pause frames but we do not | ||
267 | * support receiving pause frames) | ||
268 | * 3: Both Rx and Tx flow control (symmetric) are enabled. | ||
269 | * other: Invalid. | ||
270 | */ | ||
271 | switch (hw->fc.type) { | ||
272 | case ixgbe_fc_none: | ||
273 | break; | ||
274 | case ixgbe_fc_rx_pause: | ||
275 | /* | ||
276 | * Rx Flow control is enabled, | ||
277 | * and Tx Flow control is disabled. | ||
278 | */ | ||
279 | frctl_reg |= IXGBE_FCTRL_RFCE; | ||
280 | break; | ||
281 | case ixgbe_fc_tx_pause: | ||
282 | /* | ||
283 | * Tx Flow control is enabled, and Rx Flow control is disabled, | ||
284 | * by a software over-ride. | ||
285 | */ | ||
286 | rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; | ||
287 | break; | ||
288 | case ixgbe_fc_full: | ||
289 | /* | ||
290 | * Flow control (both Rx and Tx) is enabled by a software | ||
291 | * over-ride. | ||
292 | */ | ||
293 | frctl_reg |= IXGBE_FCTRL_RFCE; | ||
294 | rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; | ||
295 | break; | ||
296 | default: | ||
297 | /* We should never get here. The value should be 0-3. */ | ||
298 | hw_dbg(hw, "Flow control param set incorrectly\n"); | ||
299 | break; | ||
300 | } | ||
301 | |||
302 | /* Enable 802.3x based flow control settings. */ | ||
303 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); | ||
304 | IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); | ||
305 | |||
306 | /* | ||
307 | * Check for invalid software configuration, zeros are completely | ||
308 | * invalid for all parameters used past this point, and if we enable | ||
309 | * flow control with zero water marks, we blast flow control packets. | ||
310 | */ | ||
311 | if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { | ||
312 | hw_dbg(hw, "Flow control structure initialized incorrectly\n"); | ||
313 | return IXGBE_ERR_INVALID_LINK_SETTINGS; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * We need to set up the Receive Threshold high and low water | ||
318 | * marks as well as (optionally) enabling the transmission of | ||
319 | * XON frames. | ||
320 | */ | ||
321 | if (hw->fc.type & ixgbe_fc_tx_pause) { | ||
322 | if (hw->fc.send_xon) { | ||
323 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), | ||
324 | (hw->fc.low_water | IXGBE_FCRTL_XONE)); | ||
325 | } else { | ||
326 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), | ||
327 | hw->fc.low_water); | ||
328 | } | ||
329 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), | ||
330 | (hw->fc.high_water)|IXGBE_FCRTH_FCEN); | ||
331 | } | ||
332 | |||
333 | IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); | ||
334 | IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | /** | ||
214 | * ixgbe_setup_mac_link_82598 - Configures MAC link settings | 340 | * ixgbe_setup_mac_link_82598 - Configures MAC link settings |
215 | * @hw: pointer to hardware structure | 341 | * @hw: pointer to hardware structure |
216 | * | 342 | * |
@@ -254,8 +380,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) | |||
254 | } | 380 | } |
255 | if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { | 381 | if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { |
256 | status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; | 382 | status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; |
257 | hw_dbg(hw, | 383 | hw_dbg(hw, "Autonegotiation did not complete.\n"); |
258 | "Autonegotiation did not complete.\n"); | ||
259 | } | 384 | } |
260 | } | 385 | } |
261 | } | 386 | } |
@@ -265,8 +390,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) | |||
265 | * case we get disconnected and then reconnected into a different hub | 390 | * case we get disconnected and then reconnected into a different hub |
266 | * or switch with different Flow Control capabilities. | 391 | * or switch with different Flow Control capabilities. |
267 | */ | 392 | */ |
268 | hw->fc.type = hw->fc.original_type; | 393 | hw->fc.original_type = hw->fc.type; |
269 | ixgbe_setup_fc(hw, 0); | 394 | ixgbe_setup_fc_82598(hw, 0); |
270 | 395 | ||
271 | /* Add delay to filter out noises during initial link setup */ | 396 | /* Add delay to filter out noises during initial link setup */ |
272 | msleep(50); | 397 | msleep(50); |
@@ -283,15 +408,13 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) | |||
283 | * | 408 | * |
284 | * Reads the links register to determine if link is up and the current speed | 409 | * Reads the links register to determine if link is up and the current speed |
285 | **/ | 410 | **/ |
286 | static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, | 411 | static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, |
287 | bool *link_up, | 412 | bool *link_up, bool link_up_wait_to_complete) |
288 | bool link_up_wait_to_complete) | ||
289 | { | 413 | { |
290 | u32 links_reg; | 414 | u32 links_reg; |
291 | u32 i; | 415 | u32 i; |
292 | 416 | ||
293 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); | 417 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); |
294 | |||
295 | if (link_up_wait_to_complete) { | 418 | if (link_up_wait_to_complete) { |
296 | for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { | 419 | for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { |
297 | if (links_reg & IXGBE_LINKS_UP) { | 420 | if (links_reg & IXGBE_LINKS_UP) { |
@@ -318,6 +441,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, | |||
318 | return 0; | 441 | return 0; |
319 | } | 442 | } |
320 | 443 | ||
444 | |||
321 | /** | 445 | /** |
322 | * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed | 446 | * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed |
323 | * @hw: pointer to hardware structure | 447 | * @hw: pointer to hardware structure |
@@ -328,18 +452,18 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, | |||
328 | * Set the link speed in the AUTOC register and restarts link. | 452 | * Set the link speed in the AUTOC register and restarts link. |
329 | **/ | 453 | **/ |
330 | static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, | 454 | static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, |
331 | u32 speed, bool autoneg, | 455 | ixgbe_link_speed speed, bool autoneg, |
332 | bool autoneg_wait_to_complete) | 456 | bool autoneg_wait_to_complete) |
333 | { | 457 | { |
334 | s32 status = 0; | 458 | s32 status = 0; |
335 | 459 | ||
336 | /* If speed is 10G, then check for CX4 or XAUI. */ | 460 | /* If speed is 10G, then check for CX4 or XAUI. */ |
337 | if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && | 461 | if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && |
338 | (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) | 462 | (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) { |
339 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; | 463 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; |
340 | else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) | 464 | } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) { |
341 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; | 465 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; |
342 | else if (autoneg) { | 466 | } else if (autoneg) { |
343 | /* BX mode - Autonegotiate 1G */ | 467 | /* BX mode - Autonegotiate 1G */ |
344 | if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) | 468 | if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) |
345 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; | 469 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; |
@@ -358,7 +482,7 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, | |||
358 | * ixgbe_hw This will write the AUTOC register based on the new | 482 | * ixgbe_hw This will write the AUTOC register based on the new |
359 | * stored values | 483 | * stored values |
360 | */ | 484 | */ |
361 | hw->mac.ops.setup_link(hw); | 485 | ixgbe_setup_mac_link_82598(hw); |
362 | } | 486 | } |
363 | 487 | ||
364 | return status; | 488 | return status; |
@@ -376,18 +500,17 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, | |||
376 | **/ | 500 | **/ |
377 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) | 501 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) |
378 | { | 502 | { |
379 | s32 status = 0; | 503 | s32 status; |
380 | 504 | ||
381 | /* Restart autonegotiation on PHY */ | 505 | /* Restart autonegotiation on PHY */ |
382 | if (hw->phy.ops.setup_link) | 506 | status = hw->phy.ops.setup_link(hw); |
383 | status = hw->phy.ops.setup_link(hw); | ||
384 | 507 | ||
385 | /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */ | 508 | /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ |
386 | hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); | 509 | hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); |
387 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; | 510 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; |
388 | 511 | ||
389 | /* Set up MAC */ | 512 | /* Set up MAC */ |
390 | hw->mac.ops.setup_link(hw); | 513 | ixgbe_setup_mac_link_82598(hw); |
391 | 514 | ||
392 | return status; | 515 | return status; |
393 | } | 516 | } |
@@ -401,14 +524,14 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) | |||
401 | * | 524 | * |
402 | * Sets the link speed in the AUTOC register in the MAC and restarts link. | 525 | * Sets the link speed in the AUTOC register in the MAC and restarts link. |
403 | **/ | 526 | **/ |
404 | static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, | 527 | static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, |
528 | ixgbe_link_speed speed, | ||
405 | bool autoneg, | 529 | bool autoneg, |
406 | bool autoneg_wait_to_complete) | 530 | bool autoneg_wait_to_complete) |
407 | { | 531 | { |
408 | s32 status = 0; | 532 | s32 status; |
409 | 533 | ||
410 | /* Setup the PHY according to input speed */ | 534 | /* Setup the PHY according to input speed */ |
411 | if (hw->phy.ops.setup_link_speed) | ||
412 | status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, | 535 | status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, |
413 | autoneg_wait_to_complete); | 536 | autoneg_wait_to_complete); |
414 | 537 | ||
@@ -417,7 +540,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, | |||
417 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; | 540 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; |
418 | 541 | ||
419 | /* Set up MAC */ | 542 | /* Set up MAC */ |
420 | hw->mac.ops.setup_link(hw); | 543 | ixgbe_setup_mac_link_82598(hw); |
421 | 544 | ||
422 | return status; | 545 | return status; |
423 | } | 546 | } |
@@ -426,7 +549,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, | |||
426 | * ixgbe_reset_hw_82598 - Performs hardware reset | 549 | * ixgbe_reset_hw_82598 - Performs hardware reset |
427 | * @hw: pointer to hardware structure | 550 | * @hw: pointer to hardware structure |
428 | * | 551 | * |
429 | * Resets the hardware by reseting the transmit and receive units, masks and | 552 | * Resets the hardware by resetting the transmit and receive units, masks and |
430 | * clears all interrupts, performing a PHY reset, and performing a link (MAC) | 553 | * clears all interrupts, performing a PHY reset, and performing a link (MAC) |
431 | * reset. | 554 | * reset. |
432 | **/ | 555 | **/ |
@@ -440,35 +563,44 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) | |||
440 | u8 analog_val; | 563 | u8 analog_val; |
441 | 564 | ||
442 | /* Call adapter stop to disable tx/rx and clear interrupts */ | 565 | /* Call adapter stop to disable tx/rx and clear interrupts */ |
443 | ixgbe_stop_adapter(hw); | 566 | hw->mac.ops.stop_adapter(hw); |
444 | 567 | ||
445 | /* | 568 | /* |
446 | * Power up the Atlas TX lanes if they are currently powered down. | 569 | * Power up the Atlas Tx lanes if they are currently powered down. |
447 | * Atlas TX lanes are powered down for MAC loopback tests, but | 570 | * Atlas Tx lanes are powered down for MAC loopback tests, but |
448 | * they are not automatically restored on reset. | 571 | * they are not automatically restored on reset. |
449 | */ | 572 | */ |
450 | ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); | 573 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); |
451 | if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { | 574 | if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { |
452 | /* Enable TX Atlas so packets can be transmitted again */ | 575 | /* Enable Tx Atlas so packets can be transmitted again */ |
453 | ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); | 576 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, |
577 | &analog_val); | ||
454 | analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; | 578 | analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; |
455 | ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); | 579 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, |
580 | analog_val); | ||
456 | 581 | ||
457 | ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); | 582 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, |
583 | &analog_val); | ||
458 | analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; | 584 | analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; |
459 | ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); | 585 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, |
586 | analog_val); | ||
460 | 587 | ||
461 | ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); | 588 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, |
589 | &analog_val); | ||
462 | analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; | 590 | analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; |
463 | ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); | 591 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, |
592 | analog_val); | ||
464 | 593 | ||
465 | ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); | 594 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, |
595 | &analog_val); | ||
466 | analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; | 596 | analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; |
467 | ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); | 597 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, |
598 | analog_val); | ||
468 | } | 599 | } |
469 | 600 | ||
470 | /* Reset PHY */ | 601 | /* Reset PHY */ |
471 | ixgbe_reset_phy(hw); | 602 | if (hw->phy.reset_disable == false) |
603 | hw->phy.ops.reset(hw); | ||
472 | 604 | ||
473 | /* | 605 | /* |
474 | * Prevent the PCI-E bus from from hanging by disabling PCI-E master | 606 | * Prevent the PCI-E bus from from hanging by disabling PCI-E master |
@@ -527,23 +659,305 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) | |||
527 | } | 659 | } |
528 | 660 | ||
529 | /* Store the permanent mac address */ | 661 | /* Store the permanent mac address */ |
530 | ixgbe_get_mac_addr(hw, hw->mac.perm_addr); | 662 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); |
531 | 663 | ||
532 | return status; | 664 | return status; |
533 | } | 665 | } |
534 | 666 | ||
667 | /** | ||
668 | * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address | ||
669 | * @hw: pointer to hardware struct | ||
670 | * @rar: receive address register index to associate with a VMDq index | ||
671 | * @vmdq: VMDq set index | ||
672 | **/ | ||
673 | s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | ||
674 | { | ||
675 | u32 rar_high; | ||
676 | |||
677 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); | ||
678 | rar_high &= ~IXGBE_RAH_VIND_MASK; | ||
679 | rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); | ||
680 | IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); | ||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address | ||
686 | * @hw: pointer to hardware struct | ||
687 | * @rar: receive address register index to associate with a VMDq index | ||
688 | * @vmdq: VMDq clear index (not used in 82598, but elsewhere) | ||
689 | **/ | ||
690 | static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | ||
691 | { | ||
692 | u32 rar_high; | ||
693 | u32 rar_entries = hw->mac.num_rar_entries; | ||
694 | |||
695 | if (rar < rar_entries) { | ||
696 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); | ||
697 | if (rar_high & IXGBE_RAH_VIND_MASK) { | ||
698 | rar_high &= ~IXGBE_RAH_VIND_MASK; | ||
699 | IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); | ||
700 | } | ||
701 | } else { | ||
702 | hw_dbg(hw, "RAR index %d is out of range.\n", rar); | ||
703 | } | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * ixgbe_set_vfta_82598 - Set VLAN filter table | ||
710 | * @hw: pointer to hardware structure | ||
711 | * @vlan: VLAN id to write to VLAN filter | ||
712 | * @vind: VMDq output index that maps queue to VLAN id in VFTA | ||
713 | * @vlan_on: boolean flag to turn on/off VLAN in VFTA | ||
714 | * | ||
715 | * Turn on/off specified VLAN in the VLAN filter table. | ||
716 | **/ | ||
717 | s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, | ||
718 | bool vlan_on) | ||
719 | { | ||
720 | u32 regindex; | ||
721 | u32 bitindex; | ||
722 | u32 bits; | ||
723 | u32 vftabyte; | ||
724 | |||
725 | if (vlan > 4095) | ||
726 | return IXGBE_ERR_PARAM; | ||
727 | |||
728 | /* Determine 32-bit word position in array */ | ||
729 | regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ | ||
730 | |||
731 | /* Determine the location of the (VMD) queue index */ | ||
732 | vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ | ||
733 | bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ | ||
734 | |||
735 | /* Set the nibble for VMD queue index */ | ||
736 | bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); | ||
737 | bits &= (~(0x0F << bitindex)); | ||
738 | bits |= (vind << bitindex); | ||
739 | IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); | ||
740 | |||
741 | /* Determine the location of the bit for this VLAN id */ | ||
742 | bitindex = vlan & 0x1F; /* lower five bits */ | ||
743 | |||
744 | bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); | ||
745 | if (vlan_on) | ||
746 | /* Turn on this VLAN id */ | ||
747 | bits |= (1 << bitindex); | ||
748 | else | ||
749 | /* Turn off this VLAN id */ | ||
750 | bits &= ~(1 << bitindex); | ||
751 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * ixgbe_clear_vfta_82598 - Clear VLAN filter table | ||
758 | * @hw: pointer to hardware structure | ||
759 | * | ||
760 | * Clears the VLAN filer table, and the VMDq index associated with the filter | ||
761 | **/ | ||
762 | static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) | ||
763 | { | ||
764 | u32 offset; | ||
765 | u32 vlanbyte; | ||
766 | |||
767 | for (offset = 0; offset < hw->mac.vft_size; offset++) | ||
768 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); | ||
769 | |||
770 | for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) | ||
771 | for (offset = 0; offset < hw->mac.vft_size; offset++) | ||
772 | IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), | ||
773 | 0); | ||
774 | |||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | /** | ||
779 | * ixgbe_blink_led_start_82598 - Blink LED based on index. | ||
780 | * @hw: pointer to hardware structure | ||
781 | * @index: led number to blink | ||
782 | **/ | ||
783 | static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) | ||
784 | { | ||
785 | ixgbe_link_speed speed = 0; | ||
786 | bool link_up = 0; | ||
787 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
788 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
789 | |||
790 | /* | ||
791 | * Link must be up to auto-blink the LEDs on the 82598EB MAC; | ||
792 | * force it if link is down. | ||
793 | */ | ||
794 | hw->mac.ops.check_link(hw, &speed, &link_up, false); | ||
795 | |||
796 | if (!link_up) { | ||
797 | autoc_reg |= IXGBE_AUTOC_FLU; | ||
798 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | ||
799 | msleep(10); | ||
800 | } | ||
801 | |||
802 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
803 | led_reg |= IXGBE_LED_BLINK(index); | ||
804 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
805 | IXGBE_WRITE_FLUSH(hw); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | |||
810 | /** | ||
811 | * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. | ||
812 | * @hw: pointer to hardware structure | ||
813 | * @index: led number to stop blinking | ||
814 | **/ | ||
815 | static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) | ||
816 | { | ||
817 | u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
818 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
819 | |||
820 | autoc_reg &= ~IXGBE_AUTOC_FLU; | ||
821 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; | ||
822 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | ||
823 | |||
824 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
825 | led_reg &= ~IXGBE_LED_BLINK(index); | ||
826 | led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); | ||
827 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); | ||
828 | IXGBE_WRITE_FLUSH(hw); | ||
829 | |||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | /** | ||
834 | * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register | ||
835 | * @hw: pointer to hardware structure | ||
836 | * @reg: analog register to read | ||
837 | * @val: read value | ||
838 | * | ||
839 | * Performs read operation to Atlas analog register specified. | ||
840 | **/ | ||
841 | s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) | ||
842 | { | ||
843 | u32 atlas_ctl; | ||
844 | |||
845 | IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, | ||
846 | IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); | ||
847 | IXGBE_WRITE_FLUSH(hw); | ||
848 | udelay(10); | ||
849 | atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); | ||
850 | *val = (u8)atlas_ctl; | ||
851 | |||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | /** | ||
856 | * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register | ||
857 | * @hw: pointer to hardware structure | ||
858 | * @reg: atlas register to write | ||
859 | * @val: value to write | ||
860 | * | ||
861 | * Performs write operation to Atlas analog register specified. | ||
862 | **/ | ||
863 | s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) | ||
864 | { | ||
865 | u32 atlas_ctl; | ||
866 | |||
867 | atlas_ctl = (reg << 8) | val; | ||
868 | IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); | ||
869 | IXGBE_WRITE_FLUSH(hw); | ||
870 | udelay(10); | ||
871 | |||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | /** | ||
876 | * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type | ||
877 | * @hw: pointer to hardware structure | ||
878 | * | ||
879 | * Determines physical layer capabilities of the current configuration. | ||
880 | **/ | ||
881 | s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) | ||
882 | { | ||
883 | s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | ||
884 | |||
885 | switch (hw->device_id) { | ||
886 | case IXGBE_DEV_ID_82598EB_CX4: | ||
887 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: | ||
888 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; | ||
889 | break; | ||
890 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: | ||
891 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: | ||
892 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; | ||
893 | break; | ||
894 | case IXGBE_DEV_ID_82598EB_XF_LR: | ||
895 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; | ||
896 | break; | ||
897 | |||
898 | default: | ||
899 | physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | ||
900 | break; | ||
901 | } | ||
902 | |||
903 | return physical_layer; | ||
904 | } | ||
905 | |||
535 | static struct ixgbe_mac_operations mac_ops_82598 = { | 906 | static struct ixgbe_mac_operations mac_ops_82598 = { |
536 | .reset = &ixgbe_reset_hw_82598, | 907 | .init_hw = &ixgbe_init_hw_generic, |
908 | .reset_hw = &ixgbe_reset_hw_82598, | ||
909 | .start_hw = &ixgbe_start_hw_generic, | ||
910 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, | ||
537 | .get_media_type = &ixgbe_get_media_type_82598, | 911 | .get_media_type = &ixgbe_get_media_type_82598, |
912 | .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, | ||
913 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | ||
914 | .stop_adapter = &ixgbe_stop_adapter_generic, | ||
915 | .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, | ||
916 | .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, | ||
538 | .setup_link = &ixgbe_setup_mac_link_82598, | 917 | .setup_link = &ixgbe_setup_mac_link_82598, |
539 | .check_link = &ixgbe_check_mac_link_82598, | ||
540 | .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, | 918 | .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, |
541 | .get_link_settings = &ixgbe_get_link_settings_82598, | 919 | .check_link = &ixgbe_check_mac_link_82598, |
920 | .get_link_capabilities = &ixgbe_get_link_capabilities_82598, | ||
921 | .led_on = &ixgbe_led_on_generic, | ||
922 | .led_off = &ixgbe_led_off_generic, | ||
923 | .blink_led_start = &ixgbe_blink_led_start_82598, | ||
924 | .blink_led_stop = &ixgbe_blink_led_stop_82598, | ||
925 | .set_rar = &ixgbe_set_rar_generic, | ||
926 | .clear_rar = &ixgbe_clear_rar_generic, | ||
927 | .set_vmdq = &ixgbe_set_vmdq_82598, | ||
928 | .clear_vmdq = &ixgbe_clear_vmdq_82598, | ||
929 | .init_rx_addrs = &ixgbe_init_rx_addrs_generic, | ||
930 | .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, | ||
931 | .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, | ||
932 | .enable_mc = &ixgbe_enable_mc_generic, | ||
933 | .disable_mc = &ixgbe_disable_mc_generic, | ||
934 | .clear_vfta = &ixgbe_clear_vfta_82598, | ||
935 | .set_vfta = &ixgbe_set_vfta_82598, | ||
936 | .setup_fc = &ixgbe_setup_fc_82598, | ||
937 | }; | ||
938 | |||
939 | static struct ixgbe_eeprom_operations eeprom_ops_82598 = { | ||
940 | .init_params = &ixgbe_init_eeprom_params_generic, | ||
941 | .read = &ixgbe_read_eeprom_generic, | ||
942 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | ||
943 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, | ||
944 | }; | ||
945 | |||
946 | static struct ixgbe_phy_operations phy_ops_82598 = { | ||
947 | .identify = &ixgbe_identify_phy_generic, | ||
948 | /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */ | ||
949 | .reset = &ixgbe_reset_phy_generic, | ||
950 | .read_reg = &ixgbe_read_phy_reg_generic, | ||
951 | .write_reg = &ixgbe_write_phy_reg_generic, | ||
952 | .setup_link = &ixgbe_setup_phy_link_generic, | ||
953 | .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, | ||
542 | }; | 954 | }; |
543 | 955 | ||
544 | struct ixgbe_info ixgbe_82598_info = { | 956 | struct ixgbe_info ixgbe_82598_info = { |
545 | .mac = ixgbe_mac_82598EB, | 957 | .mac = ixgbe_mac_82598EB, |
546 | .get_invariants = &ixgbe_get_invariants_82598, | 958 | .get_invariants = &ixgbe_get_invariants_82598, |
547 | .mac_ops = &mac_ops_82598, | 959 | .mac_ops = &mac_ops_82598, |
960 | .eeprom_ops = &eeprom_ops_82598, | ||
961 | .phy_ops = &phy_ops_82598, | ||
548 | }; | 962 | }; |
549 | 963 | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index f5b2617111aa..a11ff0db9d25 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -33,20 +33,28 @@ | |||
33 | #include "ixgbe_common.h" | 33 | #include "ixgbe_common.h" |
34 | #include "ixgbe_phy.h" | 34 | #include "ixgbe_phy.h" |
35 | 35 | ||
36 | static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); | ||
37 | |||
38 | static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); | 36 | static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); |
37 | static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); | ||
39 | static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); | 38 | static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); |
40 | static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); | 39 | static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); |
40 | static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); | ||
41 | static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); | ||
42 | static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, | ||
43 | u16 count); | ||
44 | static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); | ||
45 | static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); | ||
46 | static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); | ||
47 | static void ixgbe_release_eeprom(struct ixgbe_hw *hw); | ||
41 | static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); | 48 | static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); |
42 | 49 | ||
43 | static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); | 50 | static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); |
44 | static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); | 51 | static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); |
45 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); | 52 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); |
46 | static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); | 53 | static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); |
54 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); | ||
47 | 55 | ||
48 | /** | 56 | /** |
49 | * ixgbe_start_hw - Prepare hardware for TX/RX | 57 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx |
50 | * @hw: pointer to hardware structure | 58 | * @hw: pointer to hardware structure |
51 | * | 59 | * |
52 | * Starts the hardware by filling the bus info structure and media type, clears | 60 | * Starts the hardware by filling the bus info structure and media type, clears |
@@ -54,7 +62,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); | |||
54 | * table, VLAN filter table, calls routine to set up link and flow control | 62 | * table, VLAN filter table, calls routine to set up link and flow control |
55 | * settings, and leaves transmit and receive units disabled and uninitialized | 63 | * settings, and leaves transmit and receive units disabled and uninitialized |
56 | **/ | 64 | **/ |
57 | s32 ixgbe_start_hw(struct ixgbe_hw *hw) | 65 | s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) |
58 | { | 66 | { |
59 | u32 ctrl_ext; | 67 | u32 ctrl_ext; |
60 | 68 | ||
@@ -62,22 +70,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw) | |||
62 | hw->phy.media_type = hw->mac.ops.get_media_type(hw); | 70 | hw->phy.media_type = hw->mac.ops.get_media_type(hw); |
63 | 71 | ||
64 | /* Identify the PHY */ | 72 | /* Identify the PHY */ |
65 | ixgbe_identify_phy(hw); | 73 | hw->phy.ops.identify(hw); |
66 | 74 | ||
67 | /* | 75 | /* |
68 | * Store MAC address from RAR0, clear receive address registers, and | 76 | * Store MAC address from RAR0, clear receive address registers, and |
69 | * clear the multicast table | 77 | * clear the multicast table |
70 | */ | 78 | */ |
71 | ixgbe_init_rx_addrs(hw); | 79 | hw->mac.ops.init_rx_addrs(hw); |
72 | 80 | ||
73 | /* Clear the VLAN filter table */ | 81 | /* Clear the VLAN filter table */ |
74 | ixgbe_clear_vfta(hw); | 82 | hw->mac.ops.clear_vfta(hw); |
75 | 83 | ||
76 | /* Set up link */ | 84 | /* Set up link */ |
77 | hw->mac.ops.setup_link(hw); | 85 | hw->mac.ops.setup_link(hw); |
78 | 86 | ||
79 | /* Clear statistics registers */ | 87 | /* Clear statistics registers */ |
80 | ixgbe_clear_hw_cntrs(hw); | 88 | hw->mac.ops.clear_hw_cntrs(hw); |
81 | 89 | ||
82 | /* Set No Snoop Disable */ | 90 | /* Set No Snoop Disable */ |
83 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | 91 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
@@ -92,34 +100,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw) | |||
92 | } | 100 | } |
93 | 101 | ||
94 | /** | 102 | /** |
95 | * ixgbe_init_hw - Generic hardware initialization | 103 | * ixgbe_init_hw_generic - Generic hardware initialization |
96 | * @hw: pointer to hardware structure | 104 | * @hw: pointer to hardware structure |
97 | * | 105 | * |
98 | * Initialize the hardware by reseting the hardware, filling the bus info | 106 | * Initialize the hardware by resetting the hardware, filling the bus info |
99 | * structure and media type, clears all on chip counters, initializes receive | 107 | * structure and media type, clears all on chip counters, initializes receive |
100 | * address registers, multicast table, VLAN filter table, calls routine to set | 108 | * address registers, multicast table, VLAN filter table, calls routine to set |
101 | * up link and flow control settings, and leaves transmit and receive units | 109 | * up link and flow control settings, and leaves transmit and receive units |
102 | * disabled and uninitialized | 110 | * disabled and uninitialized |
103 | **/ | 111 | **/ |
104 | s32 ixgbe_init_hw(struct ixgbe_hw *hw) | 112 | s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) |
105 | { | 113 | { |
106 | /* Reset the hardware */ | 114 | /* Reset the hardware */ |
107 | hw->mac.ops.reset(hw); | 115 | hw->mac.ops.reset_hw(hw); |
108 | 116 | ||
109 | /* Start the HW */ | 117 | /* Start the HW */ |
110 | ixgbe_start_hw(hw); | 118 | hw->mac.ops.start_hw(hw); |
111 | 119 | ||
112 | return 0; | 120 | return 0; |
113 | } | 121 | } |
114 | 122 | ||
115 | /** | 123 | /** |
116 | * ixgbe_clear_hw_cntrs - Generic clear hardware counters | 124 | * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters |
117 | * @hw: pointer to hardware structure | 125 | * @hw: pointer to hardware structure |
118 | * | 126 | * |
119 | * Clears all hardware statistics counters by reading them from the hardware | 127 | * Clears all hardware statistics counters by reading them from the hardware |
120 | * Statistics counters are clear on read. | 128 | * Statistics counters are clear on read. |
121 | **/ | 129 | **/ |
122 | static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) | 130 | s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) |
123 | { | 131 | { |
124 | u16 i = 0; | 132 | u16 i = 0; |
125 | 133 | ||
@@ -191,7 +199,36 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) | |||
191 | } | 199 | } |
192 | 200 | ||
193 | /** | 201 | /** |
194 | * ixgbe_get_mac_addr - Generic get MAC address | 202 | * ixgbe_read_pba_num_generic - Reads part number from EEPROM |
203 | * @hw: pointer to hardware structure | ||
204 | * @pba_num: stores the part number from the EEPROM | ||
205 | * | ||
206 | * Reads the part number from the EEPROM. | ||
207 | **/ | ||
208 | s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) | ||
209 | { | ||
210 | s32 ret_val; | ||
211 | u16 data; | ||
212 | |||
213 | ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); | ||
214 | if (ret_val) { | ||
215 | hw_dbg(hw, "NVM Read Error\n"); | ||
216 | return ret_val; | ||
217 | } | ||
218 | *pba_num = (u32)(data << 16); | ||
219 | |||
220 | ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); | ||
221 | if (ret_val) { | ||
222 | hw_dbg(hw, "NVM Read Error\n"); | ||
223 | return ret_val; | ||
224 | } | ||
225 | *pba_num |= data; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * ixgbe_get_mac_addr_generic - Generic get MAC address | ||
195 | * @hw: pointer to hardware structure | 232 | * @hw: pointer to hardware structure |
196 | * @mac_addr: Adapter MAC address | 233 | * @mac_addr: Adapter MAC address |
197 | * | 234 | * |
@@ -199,7 +236,7 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) | |||
199 | * A reset of the adapter must be performed prior to calling this function | 236 | * A reset of the adapter must be performed prior to calling this function |
200 | * in order for the MAC address to have been loaded from the EEPROM into RAR0 | 237 | * in order for the MAC address to have been loaded from the EEPROM into RAR0 |
201 | **/ | 238 | **/ |
202 | s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) | 239 | s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) |
203 | { | 240 | { |
204 | u32 rar_high; | 241 | u32 rar_high; |
205 | u32 rar_low; | 242 | u32 rar_low; |
@@ -217,30 +254,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) | |||
217 | return 0; | 254 | return 0; |
218 | } | 255 | } |
219 | 256 | ||
220 | s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) | ||
221 | { | ||
222 | s32 ret_val; | ||
223 | u16 data; | ||
224 | |||
225 | ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data); | ||
226 | if (ret_val) { | ||
227 | hw_dbg(hw, "NVM Read Error\n"); | ||
228 | return ret_val; | ||
229 | } | ||
230 | *part_num = (u32)(data << 16); | ||
231 | |||
232 | ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data); | ||
233 | if (ret_val) { | ||
234 | hw_dbg(hw, "NVM Read Error\n"); | ||
235 | return ret_val; | ||
236 | } | ||
237 | *part_num |= data; | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | /** | 257 | /** |
243 | * ixgbe_stop_adapter - Generic stop TX/RX units | 258 | * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units |
244 | * @hw: pointer to hardware structure | 259 | * @hw: pointer to hardware structure |
245 | * | 260 | * |
246 | * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, | 261 | * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, |
@@ -248,7 +263,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) | |||
248 | * the shared code and drivers to determine if the adapter is in a stopped | 263 | * the shared code and drivers to determine if the adapter is in a stopped |
249 | * state and should not touch the hardware. | 264 | * state and should not touch the hardware. |
250 | **/ | 265 | **/ |
251 | s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) | 266 | s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) |
252 | { | 267 | { |
253 | u32 number_of_queues; | 268 | u32 number_of_queues; |
254 | u32 reg_val; | 269 | u32 reg_val; |
@@ -264,6 +279,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) | |||
264 | reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 279 | reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
265 | reg_val &= ~(IXGBE_RXCTRL_RXEN); | 280 | reg_val &= ~(IXGBE_RXCTRL_RXEN); |
266 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); | 281 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); |
282 | IXGBE_WRITE_FLUSH(hw); | ||
267 | msleep(2); | 283 | msleep(2); |
268 | 284 | ||
269 | /* Clear interrupt mask to stop from interrupts being generated */ | 285 | /* Clear interrupt mask to stop from interrupts being generated */ |
@@ -273,7 +289,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) | |||
273 | IXGBE_READ_REG(hw, IXGBE_EICR); | 289 | IXGBE_READ_REG(hw, IXGBE_EICR); |
274 | 290 | ||
275 | /* Disable the transmit unit. Each queue must be disabled. */ | 291 | /* Disable the transmit unit. Each queue must be disabled. */ |
276 | number_of_queues = hw->mac.num_tx_queues; | 292 | number_of_queues = hw->mac.max_tx_queues; |
277 | for (i = 0; i < number_of_queues; i++) { | 293 | for (i = 0; i < number_of_queues; i++) { |
278 | reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); | 294 | reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); |
279 | if (reg_val & IXGBE_TXDCTL_ENABLE) { | 295 | if (reg_val & IXGBE_TXDCTL_ENABLE) { |
@@ -282,15 +298,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) | |||
282 | } | 298 | } |
283 | } | 299 | } |
284 | 300 | ||
301 | /* | ||
302 | * Prevent the PCI-E bus from from hanging by disabling PCI-E master | ||
303 | * access and verify no pending requests | ||
304 | */ | ||
305 | if (ixgbe_disable_pcie_master(hw) != 0) | ||
306 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | ||
307 | |||
285 | return 0; | 308 | return 0; |
286 | } | 309 | } |
287 | 310 | ||
288 | /** | 311 | /** |
289 | * ixgbe_led_on - Turns on the software controllable LEDs. | 312 | * ixgbe_led_on_generic - Turns on the software controllable LEDs. |
290 | * @hw: pointer to hardware structure | 313 | * @hw: pointer to hardware structure |
291 | * @index: led number to turn on | 314 | * @index: led number to turn on |
292 | **/ | 315 | **/ |
293 | s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) | 316 | s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) |
294 | { | 317 | { |
295 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | 318 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); |
296 | 319 | ||
@@ -304,11 +327,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) | |||
304 | } | 327 | } |
305 | 328 | ||
306 | /** | 329 | /** |
307 | * ixgbe_led_off - Turns off the software controllable LEDs. | 330 | * ixgbe_led_off_generic - Turns off the software controllable LEDs. |
308 | * @hw: pointer to hardware structure | 331 | * @hw: pointer to hardware structure |
309 | * @index: led number to turn off | 332 | * @index: led number to turn off |
310 | **/ | 333 | **/ |
311 | s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) | 334 | s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) |
312 | { | 335 | { |
313 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | 336 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); |
314 | 337 | ||
@@ -321,15 +344,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) | |||
321 | return 0; | 344 | return 0; |
322 | } | 345 | } |
323 | 346 | ||
324 | |||
325 | /** | 347 | /** |
326 | * ixgbe_init_eeprom - Initialize EEPROM params | 348 | * ixgbe_init_eeprom_params_generic - Initialize EEPROM params |
327 | * @hw: pointer to hardware structure | 349 | * @hw: pointer to hardware structure |
328 | * | 350 | * |
329 | * Initializes the EEPROM parameters ixgbe_eeprom_info within the | 351 | * Initializes the EEPROM parameters ixgbe_eeprom_info within the |
330 | * ixgbe_hw struct in order to set up EEPROM access. | 352 | * ixgbe_hw struct in order to set up EEPROM access. |
331 | **/ | 353 | **/ |
332 | s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) | 354 | s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) |
333 | { | 355 | { |
334 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; | 356 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; |
335 | u32 eec; | 357 | u32 eec; |
@@ -337,6 +359,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) | |||
337 | 359 | ||
338 | if (eeprom->type == ixgbe_eeprom_uninitialized) { | 360 | if (eeprom->type == ixgbe_eeprom_uninitialized) { |
339 | eeprom->type = ixgbe_eeprom_none; | 361 | eeprom->type = ixgbe_eeprom_none; |
362 | /* Set default semaphore delay to 10ms which is a well | ||
363 | * tested value */ | ||
364 | eeprom->semaphore_delay = 10; | ||
340 | 365 | ||
341 | /* | 366 | /* |
342 | * Check for EEPROM present first. | 367 | * Check for EEPROM present first. |
@@ -369,18 +394,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) | |||
369 | } | 394 | } |
370 | 395 | ||
371 | /** | 396 | /** |
372 | * ixgbe_read_eeprom - Read EEPROM word using EERD | 397 | * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang |
398 | * @hw: pointer to hardware structure | ||
399 | * @offset: offset within the EEPROM to be read | ||
400 | * @data: read 16 bit value from EEPROM | ||
401 | * | ||
402 | * Reads 16 bit value from EEPROM through bit-bang method | ||
403 | **/ | ||
404 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | ||
405 | u16 *data) | ||
406 | { | ||
407 | s32 status; | ||
408 | u16 word_in; | ||
409 | u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; | ||
410 | |||
411 | hw->eeprom.ops.init_params(hw); | ||
412 | |||
413 | if (offset >= hw->eeprom.word_size) { | ||
414 | status = IXGBE_ERR_EEPROM; | ||
415 | goto out; | ||
416 | } | ||
417 | |||
418 | /* Prepare the EEPROM for reading */ | ||
419 | status = ixgbe_acquire_eeprom(hw); | ||
420 | |||
421 | if (status == 0) { | ||
422 | if (ixgbe_ready_eeprom(hw) != 0) { | ||
423 | ixgbe_release_eeprom(hw); | ||
424 | status = IXGBE_ERR_EEPROM; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | if (status == 0) { | ||
429 | ixgbe_standby_eeprom(hw); | ||
430 | |||
431 | /* | ||
432 | * Some SPI eeproms use the 8th address bit embedded in the | ||
433 | * opcode | ||
434 | */ | ||
435 | if ((hw->eeprom.address_bits == 8) && (offset >= 128)) | ||
436 | read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; | ||
437 | |||
438 | /* Send the READ command (opcode + addr) */ | ||
439 | ixgbe_shift_out_eeprom_bits(hw, read_opcode, | ||
440 | IXGBE_EEPROM_OPCODE_BITS); | ||
441 | ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), | ||
442 | hw->eeprom.address_bits); | ||
443 | |||
444 | /* Read the data. */ | ||
445 | word_in = ixgbe_shift_in_eeprom_bits(hw, 16); | ||
446 | *data = (word_in >> 8) | (word_in << 8); | ||
447 | |||
448 | /* End this read operation */ | ||
449 | ixgbe_release_eeprom(hw); | ||
450 | } | ||
451 | |||
452 | out: | ||
453 | return status; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * ixgbe_read_eeprom_generic - Read EEPROM word using EERD | ||
373 | * @hw: pointer to hardware structure | 458 | * @hw: pointer to hardware structure |
374 | * @offset: offset of word in the EEPROM to read | 459 | * @offset: offset of word in the EEPROM to read |
375 | * @data: word read from the EEPROM | 460 | * @data: word read from the EEPROM |
376 | * | 461 | * |
377 | * Reads a 16 bit word from the EEPROM using the EERD register. | 462 | * Reads a 16 bit word from the EEPROM using the EERD register. |
378 | **/ | 463 | **/ |
379 | s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) | 464 | s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) |
380 | { | 465 | { |
381 | u32 eerd; | 466 | u32 eerd; |
382 | s32 status; | 467 | s32 status; |
383 | 468 | ||
469 | hw->eeprom.ops.init_params(hw); | ||
470 | |||
471 | if (offset >= hw->eeprom.word_size) { | ||
472 | status = IXGBE_ERR_EEPROM; | ||
473 | goto out; | ||
474 | } | ||
475 | |||
384 | eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + | 476 | eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + |
385 | IXGBE_EEPROM_READ_REG_START; | 477 | IXGBE_EEPROM_READ_REG_START; |
386 | 478 | ||
@@ -393,6 +485,7 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) | |||
393 | else | 485 | else |
394 | hw_dbg(hw, "Eeprom read timed out\n"); | 486 | hw_dbg(hw, "Eeprom read timed out\n"); |
395 | 487 | ||
488 | out: | ||
396 | return status; | 489 | return status; |
397 | } | 490 | } |
398 | 491 | ||
@@ -420,6 +513,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw) | |||
420 | } | 513 | } |
421 | 514 | ||
422 | /** | 515 | /** |
516 | * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang | ||
517 | * @hw: pointer to hardware structure | ||
518 | * | ||
519 | * Prepares EEPROM for access using bit-bang method. This function should | ||
520 | * be called before issuing a command to the EEPROM. | ||
521 | **/ | ||
522 | static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) | ||
523 | { | ||
524 | s32 status = 0; | ||
525 | u32 eec; | ||
526 | u32 i; | ||
527 | |||
528 | if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) | ||
529 | status = IXGBE_ERR_SWFW_SYNC; | ||
530 | |||
531 | if (status == 0) { | ||
532 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
533 | |||
534 | /* Request EEPROM Access */ | ||
535 | eec |= IXGBE_EEC_REQ; | ||
536 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
537 | |||
538 | for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { | ||
539 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
540 | if (eec & IXGBE_EEC_GNT) | ||
541 | break; | ||
542 | udelay(5); | ||
543 | } | ||
544 | |||
545 | /* Release if grant not acquired */ | ||
546 | if (!(eec & IXGBE_EEC_GNT)) { | ||
547 | eec &= ~IXGBE_EEC_REQ; | ||
548 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
549 | hw_dbg(hw, "Could not acquire EEPROM grant\n"); | ||
550 | |||
551 | ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); | ||
552 | status = IXGBE_ERR_EEPROM; | ||
553 | } | ||
554 | } | ||
555 | |||
556 | /* Setup EEPROM for Read/Write */ | ||
557 | if (status == 0) { | ||
558 | /* Clear CS and SK */ | ||
559 | eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); | ||
560 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
561 | IXGBE_WRITE_FLUSH(hw); | ||
562 | udelay(1); | ||
563 | } | ||
564 | return status; | ||
565 | } | ||
566 | |||
567 | /** | ||
423 | * ixgbe_get_eeprom_semaphore - Get hardware semaphore | 568 | * ixgbe_get_eeprom_semaphore - Get hardware semaphore |
424 | * @hw: pointer to hardware structure | 569 | * @hw: pointer to hardware structure |
425 | * | 570 | * |
@@ -503,6 +648,217 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) | |||
503 | } | 648 | } |
504 | 649 | ||
505 | /** | 650 | /** |
651 | * ixgbe_ready_eeprom - Polls for EEPROM ready | ||
652 | * @hw: pointer to hardware structure | ||
653 | **/ | ||
654 | static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) | ||
655 | { | ||
656 | s32 status = 0; | ||
657 | u16 i; | ||
658 | u8 spi_stat_reg; | ||
659 | |||
660 | /* | ||
661 | * Read "Status Register" repeatedly until the LSB is cleared. The | ||
662 | * EEPROM will signal that the command has been completed by clearing | ||
663 | * bit 0 of the internal status register. If it's not cleared within | ||
664 | * 5 milliseconds, then error out. | ||
665 | */ | ||
666 | for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { | ||
667 | ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, | ||
668 | IXGBE_EEPROM_OPCODE_BITS); | ||
669 | spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); | ||
670 | if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) | ||
671 | break; | ||
672 | |||
673 | udelay(5); | ||
674 | ixgbe_standby_eeprom(hw); | ||
675 | }; | ||
676 | |||
677 | /* | ||
678 | * On some parts, SPI write time could vary from 0-20mSec on 3.3V | ||
679 | * devices (and only 0-5mSec on 5V devices) | ||
680 | */ | ||
681 | if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { | ||
682 | hw_dbg(hw, "SPI EEPROM Status error\n"); | ||
683 | status = IXGBE_ERR_EEPROM; | ||
684 | } | ||
685 | |||
686 | return status; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state | ||
691 | * @hw: pointer to hardware structure | ||
692 | **/ | ||
693 | static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) | ||
694 | { | ||
695 | u32 eec; | ||
696 | |||
697 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
698 | |||
699 | /* Toggle CS to flush commands */ | ||
700 | eec |= IXGBE_EEC_CS; | ||
701 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
702 | IXGBE_WRITE_FLUSH(hw); | ||
703 | udelay(1); | ||
704 | eec &= ~IXGBE_EEC_CS; | ||
705 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
706 | IXGBE_WRITE_FLUSH(hw); | ||
707 | udelay(1); | ||
708 | } | ||
709 | |||
710 | /** | ||
711 | * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. | ||
712 | * @hw: pointer to hardware structure | ||
713 | * @data: data to send to the EEPROM | ||
714 | * @count: number of bits to shift out | ||
715 | **/ | ||
716 | static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, | ||
717 | u16 count) | ||
718 | { | ||
719 | u32 eec; | ||
720 | u32 mask; | ||
721 | u32 i; | ||
722 | |||
723 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
724 | |||
725 | /* | ||
726 | * Mask is used to shift "count" bits of "data" out to the EEPROM | ||
727 | * one bit at a time. Determine the starting bit based on count | ||
728 | */ | ||
729 | mask = 0x01 << (count - 1); | ||
730 | |||
731 | for (i = 0; i < count; i++) { | ||
732 | /* | ||
733 | * A "1" is shifted out to the EEPROM by setting bit "DI" to a | ||
734 | * "1", and then raising and then lowering the clock (the SK | ||
735 | * bit controls the clock input to the EEPROM). A "0" is | ||
736 | * shifted out to the EEPROM by setting "DI" to "0" and then | ||
737 | * raising and then lowering the clock. | ||
738 | */ | ||
739 | if (data & mask) | ||
740 | eec |= IXGBE_EEC_DI; | ||
741 | else | ||
742 | eec &= ~IXGBE_EEC_DI; | ||
743 | |||
744 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
745 | IXGBE_WRITE_FLUSH(hw); | ||
746 | |||
747 | udelay(1); | ||
748 | |||
749 | ixgbe_raise_eeprom_clk(hw, &eec); | ||
750 | ixgbe_lower_eeprom_clk(hw, &eec); | ||
751 | |||
752 | /* | ||
753 | * Shift mask to signify next bit of data to shift in to the | ||
754 | * EEPROM | ||
755 | */ | ||
756 | mask = mask >> 1; | ||
757 | }; | ||
758 | |||
759 | /* We leave the "DI" bit set to "0" when we leave this routine. */ | ||
760 | eec &= ~IXGBE_EEC_DI; | ||
761 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
762 | IXGBE_WRITE_FLUSH(hw); | ||
763 | } | ||
764 | |||
765 | /** | ||
766 | * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM | ||
767 | * @hw: pointer to hardware structure | ||
768 | **/ | ||
769 | static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) | ||
770 | { | ||
771 | u32 eec; | ||
772 | u32 i; | ||
773 | u16 data = 0; | ||
774 | |||
775 | /* | ||
776 | * In order to read a register from the EEPROM, we need to shift | ||
777 | * 'count' bits in from the EEPROM. Bits are "shifted in" by raising | ||
778 | * the clock input to the EEPROM (setting the SK bit), and then reading | ||
779 | * the value of the "DO" bit. During this "shifting in" process the | ||
780 | * "DI" bit should always be clear. | ||
781 | */ | ||
782 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
783 | |||
784 | eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); | ||
785 | |||
786 | for (i = 0; i < count; i++) { | ||
787 | data = data << 1; | ||
788 | ixgbe_raise_eeprom_clk(hw, &eec); | ||
789 | |||
790 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
791 | |||
792 | eec &= ~(IXGBE_EEC_DI); | ||
793 | if (eec & IXGBE_EEC_DO) | ||
794 | data |= 1; | ||
795 | |||
796 | ixgbe_lower_eeprom_clk(hw, &eec); | ||
797 | } | ||
798 | |||
799 | return data; | ||
800 | } | ||
801 | |||
802 | /** | ||
803 | * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. | ||
804 | * @hw: pointer to hardware structure | ||
805 | * @eec: EEC register's current value | ||
806 | **/ | ||
807 | static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) | ||
808 | { | ||
809 | /* | ||
810 | * Raise the clock input to the EEPROM | ||
811 | * (setting the SK bit), then delay | ||
812 | */ | ||
813 | *eec = *eec | IXGBE_EEC_SK; | ||
814 | IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); | ||
815 | IXGBE_WRITE_FLUSH(hw); | ||
816 | udelay(1); | ||
817 | } | ||
818 | |||
819 | /** | ||
820 | * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. | ||
821 | * @hw: pointer to hardware structure | ||
822 | * @eecd: EECD's current value | ||
823 | **/ | ||
824 | static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) | ||
825 | { | ||
826 | /* | ||
827 | * Lower the clock input to the EEPROM (clearing the SK bit), then | ||
828 | * delay | ||
829 | */ | ||
830 | *eec = *eec & ~IXGBE_EEC_SK; | ||
831 | IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); | ||
832 | IXGBE_WRITE_FLUSH(hw); | ||
833 | udelay(1); | ||
834 | } | ||
835 | |||
836 | /** | ||
837 | * ixgbe_release_eeprom - Release EEPROM, release semaphores | ||
838 | * @hw: pointer to hardware structure | ||
839 | **/ | ||
840 | static void ixgbe_release_eeprom(struct ixgbe_hw *hw) | ||
841 | { | ||
842 | u32 eec; | ||
843 | |||
844 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
845 | |||
846 | eec |= IXGBE_EEC_CS; /* Pull CS high */ | ||
847 | eec &= ~IXGBE_EEC_SK; /* Lower SCK */ | ||
848 | |||
849 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
850 | IXGBE_WRITE_FLUSH(hw); | ||
851 | |||
852 | udelay(1); | ||
853 | |||
854 | /* Stop requesting EEPROM access */ | ||
855 | eec &= ~IXGBE_EEC_REQ; | ||
856 | IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); | ||
857 | |||
858 | ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); | ||
859 | } | ||
860 | |||
861 | /** | ||
506 | * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum | 862 | * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum |
507 | * @hw: pointer to hardware structure | 863 | * @hw: pointer to hardware structure |
508 | **/ | 864 | **/ |
@@ -517,7 +873,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) | |||
517 | 873 | ||
518 | /* Include 0x0-0x3F in the checksum */ | 874 | /* Include 0x0-0x3F in the checksum */ |
519 | for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { | 875 | for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { |
520 | if (ixgbe_read_eeprom(hw, i, &word) != 0) { | 876 | if (hw->eeprom.ops.read(hw, i, &word) != 0) { |
521 | hw_dbg(hw, "EEPROM read failed\n"); | 877 | hw_dbg(hw, "EEPROM read failed\n"); |
522 | break; | 878 | break; |
523 | } | 879 | } |
@@ -526,15 +882,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) | |||
526 | 882 | ||
527 | /* Include all data from pointers except for the fw pointer */ | 883 | /* Include all data from pointers except for the fw pointer */ |
528 | for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { | 884 | for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { |
529 | ixgbe_read_eeprom(hw, i, &pointer); | 885 | hw->eeprom.ops.read(hw, i, &pointer); |
530 | 886 | ||
531 | /* Make sure the pointer seems valid */ | 887 | /* Make sure the pointer seems valid */ |
532 | if (pointer != 0xFFFF && pointer != 0) { | 888 | if (pointer != 0xFFFF && pointer != 0) { |
533 | ixgbe_read_eeprom(hw, pointer, &length); | 889 | hw->eeprom.ops.read(hw, pointer, &length); |
534 | 890 | ||
535 | if (length != 0xFFFF && length != 0) { | 891 | if (length != 0xFFFF && length != 0) { |
536 | for (j = pointer+1; j <= pointer+length; j++) { | 892 | for (j = pointer+1; j <= pointer+length; j++) { |
537 | ixgbe_read_eeprom(hw, j, &word); | 893 | hw->eeprom.ops.read(hw, j, &word); |
538 | checksum += word; | 894 | checksum += word; |
539 | } | 895 | } |
540 | } | 896 | } |
@@ -547,14 +903,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) | |||
547 | } | 903 | } |
548 | 904 | ||
549 | /** | 905 | /** |
550 | * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum | 906 | * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum |
551 | * @hw: pointer to hardware structure | 907 | * @hw: pointer to hardware structure |
552 | * @checksum_val: calculated checksum | 908 | * @checksum_val: calculated checksum |
553 | * | 909 | * |
554 | * Performs checksum calculation and validates the EEPROM checksum. If the | 910 | * Performs checksum calculation and validates the EEPROM checksum. If the |
555 | * caller does not need checksum_val, the value can be NULL. | 911 | * caller does not need checksum_val, the value can be NULL. |
556 | **/ | 912 | **/ |
557 | s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) | 913 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, |
914 | u16 *checksum_val) | ||
558 | { | 915 | { |
559 | s32 status; | 916 | s32 status; |
560 | u16 checksum; | 917 | u16 checksum; |
@@ -565,12 +922,12 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) | |||
565 | * not continue or we could be in for a very long wait while every | 922 | * not continue or we could be in for a very long wait while every |
566 | * EEPROM read fails | 923 | * EEPROM read fails |
567 | */ | 924 | */ |
568 | status = ixgbe_read_eeprom(hw, 0, &checksum); | 925 | status = hw->eeprom.ops.read(hw, 0, &checksum); |
569 | 926 | ||
570 | if (status == 0) { | 927 | if (status == 0) { |
571 | checksum = ixgbe_calc_eeprom_checksum(hw); | 928 | checksum = ixgbe_calc_eeprom_checksum(hw); |
572 | 929 | ||
573 | ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); | 930 | hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); |
574 | 931 | ||
575 | /* | 932 | /* |
576 | * Verify read checksum from EEPROM is the same as | 933 | * Verify read checksum from EEPROM is the same as |
@@ -590,6 +947,33 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) | |||
590 | } | 947 | } |
591 | 948 | ||
592 | /** | 949 | /** |
950 | * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum | ||
951 | * @hw: pointer to hardware structure | ||
952 | **/ | ||
953 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) | ||
954 | { | ||
955 | s32 status; | ||
956 | u16 checksum; | ||
957 | |||
958 | /* | ||
959 | * Read the first word from the EEPROM. If this times out or fails, do | ||
960 | * not continue or we could be in for a very long wait while every | ||
961 | * EEPROM read fails | ||
962 | */ | ||
963 | status = hw->eeprom.ops.read(hw, 0, &checksum); | ||
964 | |||
965 | if (status == 0) { | ||
966 | checksum = ixgbe_calc_eeprom_checksum(hw); | ||
967 | status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, | ||
968 | checksum); | ||
969 | } else { | ||
970 | hw_dbg(hw, "EEPROM read failed\n"); | ||
971 | } | ||
972 | |||
973 | return status; | ||
974 | } | ||
975 | |||
976 | /** | ||
593 | * ixgbe_validate_mac_addr - Validate MAC address | 977 | * ixgbe_validate_mac_addr - Validate MAC address |
594 | * @mac_addr: pointer to MAC address. | 978 | * @mac_addr: pointer to MAC address. |
595 | * | 979 | * |
@@ -607,58 +991,137 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr) | |||
607 | status = IXGBE_ERR_INVALID_MAC_ADDR; | 991 | status = IXGBE_ERR_INVALID_MAC_ADDR; |
608 | /* Reject the zero address */ | 992 | /* Reject the zero address */ |
609 | else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && | 993 | else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && |
610 | mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) | 994 | mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) |
611 | status = IXGBE_ERR_INVALID_MAC_ADDR; | 995 | status = IXGBE_ERR_INVALID_MAC_ADDR; |
612 | 996 | ||
613 | return status; | 997 | return status; |
614 | } | 998 | } |
615 | 999 | ||
616 | /** | 1000 | /** |
617 | * ixgbe_set_rar - Set RX address register | 1001 | * ixgbe_set_rar_generic - Set Rx address register |
618 | * @hw: pointer to hardware structure | 1002 | * @hw: pointer to hardware structure |
619 | * @addr: Address to put into receive address register | ||
620 | * @index: Receive address register to write | 1003 | * @index: Receive address register to write |
621 | * @vind: Vind to set RAR to | 1004 | * @addr: Address to put into receive address register |
1005 | * @vmdq: VMDq "set" or "pool" index | ||
622 | * @enable_addr: set flag that address is active | 1006 | * @enable_addr: set flag that address is active |
623 | * | 1007 | * |
624 | * Puts an ethernet address into a receive address register. | 1008 | * Puts an ethernet address into a receive address register. |
625 | **/ | 1009 | **/ |
626 | s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, | 1010 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, |
627 | u32 enable_addr) | 1011 | u32 enable_addr) |
628 | { | 1012 | { |
629 | u32 rar_low, rar_high; | 1013 | u32 rar_low, rar_high; |
1014 | u32 rar_entries = hw->mac.num_rar_entries; | ||
1015 | |||
1016 | /* setup VMDq pool selection before this RAR gets enabled */ | ||
1017 | hw->mac.ops.set_vmdq(hw, index, vmdq); | ||
630 | 1018 | ||
1019 | /* Make sure we are using a valid rar index range */ | ||
1020 | if (index < rar_entries) { | ||
631 | /* | 1021 | /* |
632 | * HW expects these in little endian so we reverse the byte order from | 1022 | * HW expects these in little endian so we reverse the byte |
633 | * network order (big endian) to little endian | 1023 | * order from network order (big endian) to little endian |
634 | */ | 1024 | */ |
635 | rar_low = ((u32)addr[0] | | 1025 | rar_low = ((u32)addr[0] | |
636 | ((u32)addr[1] << 8) | | 1026 | ((u32)addr[1] << 8) | |
637 | ((u32)addr[2] << 16) | | 1027 | ((u32)addr[2] << 16) | |
638 | ((u32)addr[3] << 24)); | 1028 | ((u32)addr[3] << 24)); |
639 | 1029 | /* | |
640 | rar_high = ((u32)addr[4] | | 1030 | * Some parts put the VMDq setting in the extra RAH bits, |
641 | ((u32)addr[5] << 8) | | 1031 | * so save everything except the lower 16 bits that hold part |
642 | ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); | 1032 | * of the address and the address valid bit. |
1033 | */ | ||
1034 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | ||
1035 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); | ||
1036 | rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); | ||
643 | 1037 | ||
644 | if (enable_addr != 0) | 1038 | if (enable_addr != 0) |
645 | rar_high |= IXGBE_RAH_AV; | 1039 | rar_high |= IXGBE_RAH_AV; |
646 | 1040 | ||
647 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); | 1041 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); |
648 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | 1042 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); |
1043 | } else { | ||
1044 | hw_dbg(hw, "RAR index %d is out of range.\n", index); | ||
1045 | } | ||
1046 | |||
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | /** | ||
1051 | * ixgbe_clear_rar_generic - Remove Rx address register | ||
1052 | * @hw: pointer to hardware structure | ||
1053 | * @index: Receive address register to write | ||
1054 | * | ||
1055 | * Clears an ethernet address from a receive address register. | ||
1056 | **/ | ||
1057 | s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) | ||
1058 | { | ||
1059 | u32 rar_high; | ||
1060 | u32 rar_entries = hw->mac.num_rar_entries; | ||
1061 | |||
1062 | /* Make sure we are using a valid rar index range */ | ||
1063 | if (index < rar_entries) { | ||
1064 | /* | ||
1065 | * Some parts put the VMDq setting in the extra RAH bits, | ||
1066 | * so save everything except the lower 16 bits that hold part | ||
1067 | * of the address and the address valid bit. | ||
1068 | */ | ||
1069 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | ||
1070 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); | ||
1071 | |||
1072 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); | ||
1073 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | ||
1074 | } else { | ||
1075 | hw_dbg(hw, "RAR index %d is out of range.\n", index); | ||
1076 | } | ||
1077 | |||
1078 | /* clear VMDq pool/queue selection for this RAR */ | ||
1079 | hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); | ||
649 | 1080 | ||
650 | return 0; | 1081 | return 0; |
651 | } | 1082 | } |
652 | 1083 | ||
653 | /** | 1084 | /** |
654 | * ixgbe_init_rx_addrs - Initializes receive address filters. | 1085 | * ixgbe_enable_rar - Enable Rx address register |
1086 | * @hw: pointer to hardware structure | ||
1087 | * @index: index into the RAR table | ||
1088 | * | ||
1089 | * Enables the select receive address register. | ||
1090 | **/ | ||
1091 | static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index) | ||
1092 | { | ||
1093 | u32 rar_high; | ||
1094 | |||
1095 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | ||
1096 | rar_high |= IXGBE_RAH_AV; | ||
1097 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | ||
1098 | } | ||
1099 | |||
1100 | /** | ||
1101 | * ixgbe_disable_rar - Disable Rx address register | ||
1102 | * @hw: pointer to hardware structure | ||
1103 | * @index: index into the RAR table | ||
1104 | * | ||
1105 | * Disables the select receive address register. | ||
1106 | **/ | ||
1107 | static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index) | ||
1108 | { | ||
1109 | u32 rar_high; | ||
1110 | |||
1111 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | ||
1112 | rar_high &= (~IXGBE_RAH_AV); | ||
1113 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | ||
1114 | } | ||
1115 | |||
1116 | /** | ||
1117 | * ixgbe_init_rx_addrs_generic - Initializes receive address filters. | ||
655 | * @hw: pointer to hardware structure | 1118 | * @hw: pointer to hardware structure |
656 | * | 1119 | * |
657 | * Places the MAC address in receive address register 0 and clears the rest | 1120 | * Places the MAC address in receive address register 0 and clears the rest |
658 | * of the receive addresss registers. Clears the multicast table. Assumes | 1121 | * of the receive address registers. Clears the multicast table. Assumes |
659 | * the receiver is in reset when the routine is called. | 1122 | * the receiver is in reset when the routine is called. |
660 | **/ | 1123 | **/ |
661 | static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) | 1124 | s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) |
662 | { | 1125 | { |
663 | u32 i; | 1126 | u32 i; |
664 | u32 rar_entries = hw->mac.num_rar_entries; | 1127 | u32 rar_entries = hw->mac.num_rar_entries; |
@@ -671,7 +1134,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) | |||
671 | if (ixgbe_validate_mac_addr(hw->mac.addr) == | 1134 | if (ixgbe_validate_mac_addr(hw->mac.addr) == |
672 | IXGBE_ERR_INVALID_MAC_ADDR) { | 1135 | IXGBE_ERR_INVALID_MAC_ADDR) { |
673 | /* Get the MAC address from the RAR0 for later reference */ | 1136 | /* Get the MAC address from the RAR0 for later reference */ |
674 | ixgbe_get_mac_addr(hw, hw->mac.addr); | 1137 | hw->mac.ops.get_mac_addr(hw, hw->mac.addr); |
675 | 1138 | ||
676 | hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", | 1139 | hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", |
677 | hw->mac.addr[0], hw->mac.addr[1], | 1140 | hw->mac.addr[0], hw->mac.addr[1], |
@@ -687,13 +1150,14 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) | |||
687 | hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], | 1150 | hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], |
688 | hw->mac.addr[4], hw->mac.addr[5]); | 1151 | hw->mac.addr[4], hw->mac.addr[5]); |
689 | 1152 | ||
690 | ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 1153 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
691 | } | 1154 | } |
1155 | hw->addr_ctrl.overflow_promisc = 0; | ||
692 | 1156 | ||
693 | hw->addr_ctrl.rar_used_count = 1; | 1157 | hw->addr_ctrl.rar_used_count = 1; |
694 | 1158 | ||
695 | /* Zero out the other receive addresses. */ | 1159 | /* Zero out the other receive addresses. */ |
696 | hw_dbg(hw, "Clearing RAR[1-15]\n"); | 1160 | hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); |
697 | for (i = 1; i < rar_entries; i++) { | 1161 | for (i = 1; i < rar_entries; i++) { |
698 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); | 1162 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); |
699 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); | 1163 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); |
@@ -708,6 +1172,9 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) | |||
708 | for (i = 0; i < hw->mac.mcft_size; i++) | 1172 | for (i = 0; i < hw->mac.mcft_size; i++) |
709 | IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); | 1173 | IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); |
710 | 1174 | ||
1175 | if (hw->mac.ops.init_uta_tables) | ||
1176 | hw->mac.ops.init_uta_tables(hw); | ||
1177 | |||
711 | return 0; | 1178 | return 0; |
712 | } | 1179 | } |
713 | 1180 | ||
@@ -718,7 +1185,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) | |||
718 | * | 1185 | * |
719 | * Adds it to unused receive address register or goes into promiscuous mode. | 1186 | * Adds it to unused receive address register or goes into promiscuous mode. |
720 | **/ | 1187 | **/ |
721 | void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) | 1188 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) |
722 | { | 1189 | { |
723 | u32 rar_entries = hw->mac.num_rar_entries; | 1190 | u32 rar_entries = hw->mac.num_rar_entries; |
724 | u32 rar; | 1191 | u32 rar; |
@@ -733,7 +1200,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) | |||
733 | if (hw->addr_ctrl.rar_used_count < rar_entries) { | 1200 | if (hw->addr_ctrl.rar_used_count < rar_entries) { |
734 | rar = hw->addr_ctrl.rar_used_count - | 1201 | rar = hw->addr_ctrl.rar_used_count - |
735 | hw->addr_ctrl.mc_addr_in_rar_count; | 1202 | hw->addr_ctrl.mc_addr_in_rar_count; |
736 | ixgbe_set_rar(hw, rar, addr, 0, IXGBE_RAH_AV); | 1203 | hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); |
737 | hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); | 1204 | hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); |
738 | hw->addr_ctrl.rar_used_count++; | 1205 | hw->addr_ctrl.rar_used_count++; |
739 | } else { | 1206 | } else { |
@@ -744,7 +1211,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) | |||
744 | } | 1211 | } |
745 | 1212 | ||
746 | /** | 1213 | /** |
747 | * ixgbe_update_uc_addr_list - Updates MAC list of secondary addresses | 1214 | * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses |
748 | * @hw: pointer to hardware structure | 1215 | * @hw: pointer to hardware structure |
749 | * @addr_list: the list of new addresses | 1216 | * @addr_list: the list of new addresses |
750 | * @addr_count: number of addresses | 1217 | * @addr_count: number of addresses |
@@ -757,7 +1224,7 @@ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) | |||
757 | * Drivers using secondary unicast addresses must set user_set_promisc when | 1224 | * Drivers using secondary unicast addresses must set user_set_promisc when |
758 | * manually putting the device into promiscuous mode. | 1225 | * manually putting the device into promiscuous mode. |
759 | **/ | 1226 | **/ |
760 | s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, | 1227 | s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, |
761 | u32 addr_count, ixgbe_mc_addr_itr next) | 1228 | u32 addr_count, ixgbe_mc_addr_itr next) |
762 | { | 1229 | { |
763 | u8 *addr; | 1230 | u8 *addr; |
@@ -787,7 +1254,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, | |||
787 | for (i = 0; i < addr_count; i++) { | 1254 | for (i = 0; i < addr_count; i++) { |
788 | hw_dbg(hw, " Adding the secondary addresses:\n"); | 1255 | hw_dbg(hw, " Adding the secondary addresses:\n"); |
789 | addr = next(hw, &addr_list, &vmdq); | 1256 | addr = next(hw, &addr_list, &vmdq); |
790 | ixgbe_add_uc_addr(hw, addr); | 1257 | ixgbe_add_uc_addr(hw, addr, vmdq); |
791 | } | 1258 | } |
792 | 1259 | ||
793 | if (hw->addr_ctrl.overflow_promisc) { | 1260 | if (hw->addr_ctrl.overflow_promisc) { |
@@ -808,7 +1275,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, | |||
808 | } | 1275 | } |
809 | } | 1276 | } |
810 | 1277 | ||
811 | hw_dbg(hw, "ixgbe_update_uc_addr_list Complete\n"); | 1278 | hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n"); |
812 | return 0; | 1279 | return 0; |
813 | } | 1280 | } |
814 | 1281 | ||
@@ -821,7 +1288,7 @@ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, | |||
821 | * bit-vector to set in the multicast table. The hardware uses 12 bits, from | 1288 | * bit-vector to set in the multicast table. The hardware uses 12 bits, from |
822 | * incoming rx multicast addresses, to determine the bit-vector to check in | 1289 | * incoming rx multicast addresses, to determine the bit-vector to check in |
823 | * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set | 1290 | * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set |
824 | * by the MO field of the MCSTCTRL. The MO field is set during initalization | 1291 | * by the MO field of the MCSTCTRL. The MO field is set during initialization |
825 | * to mc_filter_type. | 1292 | * to mc_filter_type. |
826 | **/ | 1293 | **/ |
827 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) | 1294 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) |
@@ -907,10 +1374,10 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) | |||
907 | * else put it in the MTA | 1374 | * else put it in the MTA |
908 | */ | 1375 | */ |
909 | if (hw->addr_ctrl.rar_used_count < rar_entries) { | 1376 | if (hw->addr_ctrl.rar_used_count < rar_entries) { |
1377 | /* use RAR from the end up for multicast */ | ||
910 | rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; | 1378 | rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; |
911 | ixgbe_set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); | 1379 | hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); |
912 | hw_dbg(hw, "Added a multicast address to RAR[%d]\n", | 1380 | hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar); |
913 | hw->addr_ctrl.rar_used_count); | ||
914 | hw->addr_ctrl.rar_used_count++; | 1381 | hw->addr_ctrl.rar_used_count++; |
915 | hw->addr_ctrl.mc_addr_in_rar_count++; | 1382 | hw->addr_ctrl.mc_addr_in_rar_count++; |
916 | } else { | 1383 | } else { |
@@ -921,18 +1388,18 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) | |||
921 | } | 1388 | } |
922 | 1389 | ||
923 | /** | 1390 | /** |
924 | * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses | 1391 | * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses |
925 | * @hw: pointer to hardware structure | 1392 | * @hw: pointer to hardware structure |
926 | * @mc_addr_list: the list of new multicast addresses | 1393 | * @mc_addr_list: the list of new multicast addresses |
927 | * @mc_addr_count: number of addresses | 1394 | * @mc_addr_count: number of addresses |
928 | * @next: iterator function to walk the multicast address list | 1395 | * @next: iterator function to walk the multicast address list |
929 | * | 1396 | * |
930 | * The given list replaces any existing list. Clears the MC addrs from receive | 1397 | * The given list replaces any existing list. Clears the MC addrs from receive |
931 | * address registers and the multicast table. Uses unsed receive address | 1398 | * address registers and the multicast table. Uses unused receive address |
932 | * registers for the first multicast addresses, and hashes the rest into the | 1399 | * registers for the first multicast addresses, and hashes the rest into the |
933 | * multicast table. | 1400 | * multicast table. |
934 | **/ | 1401 | **/ |
935 | s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, | 1402 | s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, |
936 | u32 mc_addr_count, ixgbe_mc_addr_itr next) | 1403 | u32 mc_addr_count, ixgbe_mc_addr_itr next) |
937 | { | 1404 | { |
938 | u32 i; | 1405 | u32 i; |
@@ -949,7 +1416,8 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, | |||
949 | hw->addr_ctrl.mta_in_use = 0; | 1416 | hw->addr_ctrl.mta_in_use = 0; |
950 | 1417 | ||
951 | /* Zero out the other receive addresses. */ | 1418 | /* Zero out the other receive addresses. */ |
952 | hw_dbg(hw, "Clearing RAR[1-15]\n"); | 1419 | hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count, |
1420 | rar_entries - 1); | ||
953 | for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { | 1421 | for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { |
954 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); | 1422 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); |
955 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); | 1423 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); |
@@ -971,188 +1439,53 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, | |||
971 | IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, | 1439 | IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, |
972 | IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); | 1440 | IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); |
973 | 1441 | ||
974 | hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); | 1442 | hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); |
975 | return 0; | 1443 | return 0; |
976 | } | 1444 | } |
977 | 1445 | ||
978 | /** | 1446 | /** |
979 | * ixgbe_clear_vfta - Clear VLAN filter table | 1447 | * ixgbe_enable_mc_generic - Enable multicast address in RAR |
980 | * @hw: pointer to hardware structure | 1448 | * @hw: pointer to hardware structure |
981 | * | 1449 | * |
982 | * Clears the VLAN filer table, and the VMDq index associated with the filter | 1450 | * Enables multicast address in RAR and the use of the multicast hash table. |
983 | **/ | 1451 | **/ |
984 | static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) | 1452 | s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) |
985 | { | 1453 | { |
986 | u32 offset; | 1454 | u32 i; |
987 | u32 vlanbyte; | 1455 | u32 rar_entries = hw->mac.num_rar_entries; |
1456 | struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; | ||
988 | 1457 | ||
989 | for (offset = 0; offset < hw->mac.vft_size; offset++) | 1458 | if (a->mc_addr_in_rar_count > 0) |
990 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); | 1459 | for (i = (rar_entries - a->mc_addr_in_rar_count); |
1460 | i < rar_entries; i++) | ||
1461 | ixgbe_enable_rar(hw, i); | ||
991 | 1462 | ||
992 | for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) | 1463 | if (a->mta_in_use > 0) |
993 | for (offset = 0; offset < hw->mac.vft_size; offset++) | 1464 | IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | |
994 | IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), | 1465 | hw->mac.mc_filter_type); |
995 | 0); | ||
996 | 1466 | ||
997 | return 0; | 1467 | return 0; |
998 | } | 1468 | } |
999 | 1469 | ||
1000 | /** | 1470 | /** |
1001 | * ixgbe_set_vfta - Set VLAN filter table | 1471 | * ixgbe_disable_mc_generic - Disable multicast address in RAR |
1002 | * @hw: pointer to hardware structure | 1472 | * @hw: pointer to hardware structure |
1003 | * @vlan: VLAN id to write to VLAN filter | ||
1004 | * @vind: VMDq output index that maps queue to VLAN id in VFTA | ||
1005 | * @vlan_on: boolean flag to turn on/off VLAN in VFTA | ||
1006 | * | 1473 | * |
1007 | * Turn on/off specified VLAN in the VLAN filter table. | 1474 | * Disables multicast address in RAR and the use of the multicast hash table. |
1008 | **/ | 1475 | **/ |
1009 | s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, | 1476 | s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) |
1010 | bool vlan_on) | ||
1011 | { | 1477 | { |
1012 | u32 VftaIndex; | 1478 | u32 i; |
1013 | u32 BitOffset; | 1479 | u32 rar_entries = hw->mac.num_rar_entries; |
1014 | u32 VftaReg; | 1480 | struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; |
1015 | u32 VftaByte; | ||
1016 | |||
1017 | /* Determine 32-bit word position in array */ | ||
1018 | VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */ | ||
1019 | |||
1020 | /* Determine the location of the (VMD) queue index */ | ||
1021 | VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ | ||
1022 | BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ | ||
1023 | |||
1024 | /* Set the nibble for VMD queue index */ | ||
1025 | VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex)); | ||
1026 | VftaReg &= (~(0x0F << BitOffset)); | ||
1027 | VftaReg |= (vind << BitOffset); | ||
1028 | IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg); | ||
1029 | |||
1030 | /* Determine the location of the bit for this VLAN id */ | ||
1031 | BitOffset = vlan & 0x1F; /* lower five bits */ | ||
1032 | |||
1033 | VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex)); | ||
1034 | if (vlan_on) | ||
1035 | /* Turn on this VLAN id */ | ||
1036 | VftaReg |= (1 << BitOffset); | ||
1037 | else | ||
1038 | /* Turn off this VLAN id */ | ||
1039 | VftaReg &= ~(1 << BitOffset); | ||
1040 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg); | ||
1041 | |||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | /** | ||
1046 | * ixgbe_setup_fc - Configure flow control settings | ||
1047 | * @hw: pointer to hardware structure | ||
1048 | * @packetbuf_num: packet buffer number (0-7) | ||
1049 | * | ||
1050 | * Configures the flow control settings based on SW configuration. | ||
1051 | * This function is used for 802.3x flow control configuration only. | ||
1052 | **/ | ||
1053 | s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) | ||
1054 | { | ||
1055 | u32 frctl_reg; | ||
1056 | u32 rmcs_reg; | ||
1057 | |||
1058 | if (packetbuf_num < 0 || packetbuf_num > 7) | ||
1059 | hw_dbg(hw, "Invalid packet buffer number [%d], expected range " | ||
1060 | "is 0-7\n", packetbuf_num); | ||
1061 | |||
1062 | frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); | ||
1063 | frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); | ||
1064 | |||
1065 | rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); | ||
1066 | rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); | ||
1067 | |||
1068 | /* | ||
1069 | * 10 gig parts do not have a word in the EEPROM to determine the | ||
1070 | * default flow control setting, so we explicitly set it to full. | ||
1071 | */ | ||
1072 | if (hw->fc.type == ixgbe_fc_default) | ||
1073 | hw->fc.type = ixgbe_fc_full; | ||
1074 | |||
1075 | /* | ||
1076 | * We want to save off the original Flow Control configuration just in | ||
1077 | * case we get disconnected and then reconnected into a different hub | ||
1078 | * or switch with different Flow Control capabilities. | ||
1079 | */ | ||
1080 | hw->fc.type = hw->fc.original_type; | ||
1081 | |||
1082 | /* | ||
1083 | * The possible values of the "flow_control" parameter are: | ||
1084 | * 0: Flow control is completely disabled | ||
1085 | * 1: Rx flow control is enabled (we can receive pause frames but not | ||
1086 | * send pause frames). | ||
1087 | * 2: Tx flow control is enabled (we can send pause frames but we do not | ||
1088 | * support receiving pause frames) | ||
1089 | * 3: Both Rx and TX flow control (symmetric) are enabled. | ||
1090 | * other: Invalid. | ||
1091 | */ | ||
1092 | switch (hw->fc.type) { | ||
1093 | case ixgbe_fc_none: | ||
1094 | break; | ||
1095 | case ixgbe_fc_rx_pause: | ||
1096 | /* | ||
1097 | * RX Flow control is enabled, | ||
1098 | * and TX Flow control is disabled. | ||
1099 | */ | ||
1100 | frctl_reg |= IXGBE_FCTRL_RFCE; | ||
1101 | break; | ||
1102 | case ixgbe_fc_tx_pause: | ||
1103 | /* | ||
1104 | * TX Flow control is enabled, and RX Flow control is disabled, | ||
1105 | * by a software over-ride. | ||
1106 | */ | ||
1107 | rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; | ||
1108 | break; | ||
1109 | case ixgbe_fc_full: | ||
1110 | /* | ||
1111 | * Flow control (both RX and TX) is enabled by a software | ||
1112 | * over-ride. | ||
1113 | */ | ||
1114 | frctl_reg |= IXGBE_FCTRL_RFCE; | ||
1115 | rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; | ||
1116 | break; | ||
1117 | default: | ||
1118 | /* We should never get here. The value should be 0-3. */ | ||
1119 | hw_dbg(hw, "Flow control param set incorrectly\n"); | ||
1120 | break; | ||
1121 | } | ||
1122 | |||
1123 | /* Enable 802.3x based flow control settings. */ | ||
1124 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); | ||
1125 | IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); | ||
1126 | |||
1127 | /* | ||
1128 | * Check for invalid software configuration, zeros are completely | ||
1129 | * invalid for all parameters used past this point, and if we enable | ||
1130 | * flow control with zero water marks, we blast flow control packets. | ||
1131 | */ | ||
1132 | if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { | ||
1133 | hw_dbg(hw, "Flow control structure initialized incorrectly\n"); | ||
1134 | return IXGBE_ERR_INVALID_LINK_SETTINGS; | ||
1135 | } | ||
1136 | 1481 | ||
1137 | /* | 1482 | if (a->mc_addr_in_rar_count > 0) |
1138 | * We need to set up the Receive Threshold high and low water | 1483 | for (i = (rar_entries - a->mc_addr_in_rar_count); |
1139 | * marks as well as (optionally) enabling the transmission of | 1484 | i < rar_entries; i++) |
1140 | * XON frames. | 1485 | ixgbe_disable_rar(hw, i); |
1141 | */ | ||
1142 | if (hw->fc.type & ixgbe_fc_tx_pause) { | ||
1143 | if (hw->fc.send_xon) { | ||
1144 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), | ||
1145 | (hw->fc.low_water | IXGBE_FCRTL_XONE)); | ||
1146 | } else { | ||
1147 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), | ||
1148 | hw->fc.low_water); | ||
1149 | } | ||
1150 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), | ||
1151 | (hw->fc.high_water)|IXGBE_FCRTH_FCEN); | ||
1152 | } | ||
1153 | 1486 | ||
1154 | IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); | 1487 | if (a->mta_in_use > 0) |
1155 | IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); | 1488 | IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); |
1156 | 1489 | ||
1157 | return 0; | 1490 | return 0; |
1158 | } | 1491 | } |
@@ -1168,13 +1501,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
1168 | **/ | 1501 | **/ |
1169 | s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) | 1502 | s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) |
1170 | { | 1503 | { |
1171 | u32 ctrl; | 1504 | u32 i; |
1172 | s32 i; | 1505 | u32 reg_val; |
1506 | u32 number_of_queues; | ||
1173 | s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; | 1507 | s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; |
1174 | 1508 | ||
1175 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 1509 | /* Disable the receive unit by stopping each queue */ |
1176 | ctrl |= IXGBE_CTRL_GIO_DIS; | 1510 | number_of_queues = hw->mac.max_rx_queues; |
1177 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); | 1511 | for (i = 0; i < number_of_queues; i++) { |
1512 | reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); | ||
1513 | if (reg_val & IXGBE_RXDCTL_ENABLE) { | ||
1514 | reg_val &= ~IXGBE_RXDCTL_ENABLE; | ||
1515 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); | ||
1520 | reg_val |= IXGBE_CTRL_GIO_DIS; | ||
1521 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); | ||
1178 | 1522 | ||
1179 | for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { | 1523 | for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { |
1180 | if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { | 1524 | if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { |
@@ -1189,11 +1533,11 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) | |||
1189 | 1533 | ||
1190 | 1534 | ||
1191 | /** | 1535 | /** |
1192 | * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore | 1536 | * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore |
1193 | * @hw: pointer to hardware structure | 1537 | * @hw: pointer to hardware structure |
1194 | * @mask: Mask to specify wich semaphore to acquire | 1538 | * @mask: Mask to specify which semaphore to acquire |
1195 | * | 1539 | * |
1196 | * Aquires the SWFW semaphore throught the GSSR register for the specified | 1540 | * Acquires the SWFW semaphore thought the GSSR register for the specified |
1197 | * function (CSR, PHY0, PHY1, EEPROM, Flash) | 1541 | * function (CSR, PHY0, PHY1, EEPROM, Flash) |
1198 | **/ | 1542 | **/ |
1199 | s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) | 1543 | s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) |
@@ -1235,9 +1579,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) | |||
1235 | /** | 1579 | /** |
1236 | * ixgbe_release_swfw_sync - Release SWFW semaphore | 1580 | * ixgbe_release_swfw_sync - Release SWFW semaphore |
1237 | * @hw: pointer to hardware structure | 1581 | * @hw: pointer to hardware structure |
1238 | * @mask: Mask to specify wich semaphore to release | 1582 | * @mask: Mask to specify which semaphore to release |
1239 | * | 1583 | * |
1240 | * Releases the SWFW semaphore throught the GSSR register for the specified | 1584 | * Releases the SWFW semaphore thought the GSSR register for the specified |
1241 | * function (CSR, PHY0, PHY1, EEPROM, Flash) | 1585 | * function (CSR, PHY0, PHY1, EEPROM, Flash) |
1242 | **/ | 1586 | **/ |
1243 | void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) | 1587 | void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) |
@@ -1254,45 +1598,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) | |||
1254 | ixgbe_release_eeprom_semaphore(hw); | 1598 | ixgbe_release_eeprom_semaphore(hw); |
1255 | } | 1599 | } |
1256 | 1600 | ||
1257 | /** | ||
1258 | * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register | ||
1259 | * @hw: pointer to hardware structure | ||
1260 | * @reg: analog register to read | ||
1261 | * @val: read value | ||
1262 | * | ||
1263 | * Performs write operation to analog register specified. | ||
1264 | **/ | ||
1265 | s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) | ||
1266 | { | ||
1267 | u32 atlas_ctl; | ||
1268 | |||
1269 | IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, | ||
1270 | IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); | ||
1271 | IXGBE_WRITE_FLUSH(hw); | ||
1272 | udelay(10); | ||
1273 | atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); | ||
1274 | *val = (u8)atlas_ctl; | ||
1275 | |||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | /** | ||
1280 | * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register | ||
1281 | * @hw: pointer to hardware structure | ||
1282 | * @reg: atlas register to write | ||
1283 | * @val: value to write | ||
1284 | * | ||
1285 | * Performs write operation to Atlas analog register specified. | ||
1286 | **/ | ||
1287 | s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) | ||
1288 | { | ||
1289 | u32 atlas_ctl; | ||
1290 | |||
1291 | atlas_ctl = (reg << 8) | val; | ||
1292 | IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); | ||
1293 | IXGBE_WRITE_FLUSH(hw); | ||
1294 | udelay(10); | ||
1295 | |||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index c75ecba9ccda..13ed8d2ff4a4 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -31,36 +31,45 @@ | |||
31 | 31 | ||
32 | #include "ixgbe_type.h" | 32 | #include "ixgbe_type.h" |
33 | 33 | ||
34 | s32 ixgbe_init_hw(struct ixgbe_hw *hw); | 34 | s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); |
35 | s32 ixgbe_start_hw(struct ixgbe_hw *hw); | 35 | s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); |
36 | s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); | 36 | s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); |
37 | s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); | 37 | s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); |
38 | s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); | 38 | s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); |
39 | 39 | s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); | |
40 | s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); | 40 | s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); |
41 | s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); | 41 | s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); |
42 | 42 | ||
43 | s32 ixgbe_init_eeprom(struct ixgbe_hw *hw); | 43 | s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); |
44 | s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); | 44 | s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); |
45 | s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); | 45 | |
46 | 46 | s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); | |
47 | s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, | 47 | s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); |
48 | u32 enable_addr); | 48 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, |
49 | s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, | 49 | u16 *data); |
50 | u32 mc_addr_count, ixgbe_mc_addr_itr next); | 50 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, |
51 | s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *uc_addr_list, | 51 | u16 *checksum_val); |
52 | u32 mc_addr_count, ixgbe_mc_addr_itr next); | 52 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); |
53 | s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); | 53 | |
54 | s32 ixgbe_validate_mac_addr(u8 *mac_addr); | 54 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, |
55 | 55 | u32 enable_addr); | |
56 | s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num); | 56 | s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); |
57 | s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); | ||
58 | s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, | ||
59 | u32 mc_addr_count, | ||
60 | ixgbe_mc_addr_itr func); | ||
61 | s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, | ||
62 | u32 addr_count, ixgbe_mc_addr_itr func); | ||
63 | s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); | ||
64 | s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); | ||
57 | 65 | ||
66 | s32 ixgbe_validate_mac_addr(u8 *mac_addr); | ||
58 | s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); | 67 | s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); |
59 | void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); | 68 | void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); |
60 | s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); | 69 | s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); |
61 | 70 | ||
62 | s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); | 71 | s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); |
63 | s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); | 72 | s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); |
64 | 73 | ||
65 | #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) | 74 | #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) |
66 | 75 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 4701abf3a59b..ff4fac34a171 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -128,9 +128,10 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
128 | ecmd->advertising = (ADVERTISED_10000baseT_Full | | 128 | ecmd->advertising = (ADVERTISED_10000baseT_Full | |
129 | ADVERTISED_FIBRE); | 129 | ADVERTISED_FIBRE); |
130 | ecmd->port = PORT_FIBRE; | 130 | ecmd->port = PORT_FIBRE; |
131 | ecmd->autoneg = AUTONEG_DISABLE; | ||
131 | } | 132 | } |
132 | 133 | ||
133 | adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up, false); | 134 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
134 | if (link_up) { | 135 | if (link_up) { |
135 | ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? | 136 | ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? |
136 | SPEED_10000 : SPEED_1000; | 137 | SPEED_10000 : SPEED_1000; |
@@ -327,7 +328,7 @@ static void ixgbe_get_regs(struct net_device *netdev, | |||
327 | regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); | 328 | regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); |
328 | regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); | 329 | regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); |
329 | regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); | 330 | regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); |
330 | regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); | 331 | regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); |
331 | regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); | 332 | regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); |
332 | 333 | ||
333 | /* Flow Control */ | 334 | /* Flow Control */ |
@@ -373,7 +374,7 @@ static void ixgbe_get_regs(struct net_device *netdev, | |||
373 | regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); | 374 | regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); |
374 | for (i = 0; i < 16; i++) | 375 | for (i = 0; i < 16; i++) |
375 | regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); | 376 | regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); |
376 | regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); | 377 | regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); |
377 | regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); | 378 | regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
378 | regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 379 | regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
379 | regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); | 380 | regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); |
@@ -605,8 +606,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev, | |||
605 | return -ENOMEM; | 606 | return -ENOMEM; |
606 | 607 | ||
607 | for (i = 0; i < eeprom_len; i++) { | 608 | for (i = 0; i < eeprom_len; i++) { |
608 | if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, | 609 | if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, |
609 | &eeprom_buff[i]))) | 610 | &eeprom_buff[i]))) |
610 | break; | 611 | break; |
611 | } | 612 | } |
612 | 613 | ||
@@ -807,7 +808,7 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | |||
807 | u8 *data) | 808 | u8 *data) |
808 | { | 809 | { |
809 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 810 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
810 | u8 *p = data; | 811 | char *p = (char *)data; |
811 | int i; | 812 | int i; |
812 | 813 | ||
813 | switch (stringset) { | 814 | switch (stringset) { |
@@ -857,16 +858,17 @@ static int ixgbe_nway_reset(struct net_device *netdev) | |||
857 | static int ixgbe_phys_id(struct net_device *netdev, u32 data) | 858 | static int ixgbe_phys_id(struct net_device *netdev, u32 data) |
858 | { | 859 | { |
859 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 860 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
860 | u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); | 861 | struct ixgbe_hw *hw = &adapter->hw; |
862 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
861 | u32 i; | 863 | u32 i; |
862 | 864 | ||
863 | if (!data || data > 300) | 865 | if (!data || data > 300) |
864 | data = 300; | 866 | data = 300; |
865 | 867 | ||
866 | for (i = 0; i < (data * 1000); i += 400) { | 868 | for (i = 0; i < (data * 1000); i += 400) { |
867 | ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); | 869 | hw->mac.ops.led_on(hw, IXGBE_LED_ON); |
868 | msleep_interruptible(200); | 870 | msleep_interruptible(200); |
869 | ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); | 871 | hw->mac.ops.led_off(hw, IXGBE_LED_ON); |
870 | msleep_interruptible(200); | 872 | msleep_interruptible(200); |
871 | } | 873 | } |
872 | 874 | ||
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e18afa4e195f..df093ec830de 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1749,14 +1749,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, | |||
1749 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 1749 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
1750 | { | 1750 | { |
1751 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1751 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1752 | struct ixgbe_hw *hw = &adapter->hw; | ||
1752 | 1753 | ||
1753 | /* add VID to filter table */ | 1754 | /* add VID to filter table */ |
1754 | ixgbe_set_vfta(&adapter->hw, vid, 0, true); | 1755 | hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); |
1755 | } | 1756 | } |
1756 | 1757 | ||
1757 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 1758 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
1758 | { | 1759 | { |
1759 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1760 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1761 | struct ixgbe_hw *hw = &adapter->hw; | ||
1760 | 1762 | ||
1761 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1763 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1762 | ixgbe_irq_disable(adapter); | 1764 | ixgbe_irq_disable(adapter); |
@@ -1767,7 +1769,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1767 | ixgbe_irq_enable(adapter); | 1769 | ixgbe_irq_enable(adapter); |
1768 | 1770 | ||
1769 | /* remove VID from filter table */ | 1771 | /* remove VID from filter table */ |
1770 | ixgbe_set_vfta(&adapter->hw, vid, 0, false); | 1772 | hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); |
1771 | } | 1773 | } |
1772 | 1774 | ||
1773 | static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) | 1775 | static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) |
@@ -1843,15 +1845,15 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) | |||
1843 | addr_count = netdev->uc_count; | 1845 | addr_count = netdev->uc_count; |
1844 | if (addr_count) | 1846 | if (addr_count) |
1845 | addr_list = netdev->uc_list->dmi_addr; | 1847 | addr_list = netdev->uc_list->dmi_addr; |
1846 | ixgbe_update_uc_addr_list(hw, addr_list, addr_count, | 1848 | hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count, |
1847 | ixgbe_addr_list_itr); | 1849 | ixgbe_addr_list_itr); |
1848 | 1850 | ||
1849 | /* reprogram multicast list */ | 1851 | /* reprogram multicast list */ |
1850 | addr_count = netdev->mc_count; | 1852 | addr_count = netdev->mc_count; |
1851 | if (addr_count) | 1853 | if (addr_count) |
1852 | addr_list = netdev->mc_list->dmi_addr; | 1854 | addr_list = netdev->mc_list->dmi_addr; |
1853 | ixgbe_update_mc_addr_list(hw, addr_list, addr_count, | 1855 | hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, |
1854 | ixgbe_addr_list_itr); | 1856 | ixgbe_addr_list_itr); |
1855 | } | 1857 | } |
1856 | 1858 | ||
1857 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | 1859 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
@@ -2016,11 +2018,12 @@ int ixgbe_up(struct ixgbe_adapter *adapter) | |||
2016 | 2018 | ||
2017 | void ixgbe_reset(struct ixgbe_adapter *adapter) | 2019 | void ixgbe_reset(struct ixgbe_adapter *adapter) |
2018 | { | 2020 | { |
2019 | if (ixgbe_init_hw(&adapter->hw)) | 2021 | struct ixgbe_hw *hw = &adapter->hw; |
2020 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 2022 | if (hw->mac.ops.init_hw(hw)) |
2023 | dev_err(&adapter->pdev->dev, "Hardware Error\n"); | ||
2021 | 2024 | ||
2022 | /* reprogram the RAR[0] in case user changed it. */ | 2025 | /* reprogram the RAR[0] in case user changed it. */ |
2023 | ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); | 2026 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
2024 | 2027 | ||
2025 | } | 2028 | } |
2026 | 2029 | ||
@@ -2637,6 +2640,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2637 | struct pci_dev *pdev = adapter->pdev; | 2640 | struct pci_dev *pdev = adapter->pdev; |
2638 | unsigned int rss; | 2641 | unsigned int rss; |
2639 | 2642 | ||
2643 | /* PCI config space info */ | ||
2644 | |||
2645 | hw->vendor_id = pdev->vendor; | ||
2646 | hw->device_id = pdev->device; | ||
2647 | hw->revision_id = pdev->revision; | ||
2648 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | ||
2649 | hw->subsystem_device_id = pdev->subsystem_device; | ||
2650 | |||
2640 | /* Set capability flags */ | 2651 | /* Set capability flags */ |
2641 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); | 2652 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); |
2642 | adapter->ring_feature[RING_F_RSS].indices = rss; | 2653 | adapter->ring_feature[RING_F_RSS].indices = rss; |
@@ -2652,15 +2663,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2652 | 2663 | ||
2653 | /* select 10G link by default */ | 2664 | /* select 10G link by default */ |
2654 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; | 2665 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; |
2655 | if (hw->mac.ops.reset(hw)) { | ||
2656 | dev_err(&pdev->dev, "HW Init failed\n"); | ||
2657 | return -EIO; | ||
2658 | } | ||
2659 | if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, | ||
2660 | false)) { | ||
2661 | dev_err(&pdev->dev, "Link Speed setup failed\n"); | ||
2662 | return -EIO; | ||
2663 | } | ||
2664 | 2666 | ||
2665 | /* enable itr by default in dynamic mode */ | 2667 | /* enable itr by default in dynamic mode */ |
2666 | adapter->itr_setting = 1; | 2668 | adapter->itr_setting = 1; |
@@ -2675,7 +2677,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2675 | adapter->rx_ring_count = IXGBE_DEFAULT_RXD; | 2677 | adapter->rx_ring_count = IXGBE_DEFAULT_RXD; |
2676 | 2678 | ||
2677 | /* initialize eeprom parameters */ | 2679 | /* initialize eeprom parameters */ |
2678 | if (ixgbe_init_eeprom(hw)) { | 2680 | if (ixgbe_init_eeprom_params_generic(hw)) { |
2679 | dev_err(&pdev->dev, "EEPROM initialization failed\n"); | 2681 | dev_err(&pdev->dev, "EEPROM initialization failed\n"); |
2680 | return -EIO; | 2682 | return -EIO; |
2681 | } | 2683 | } |
@@ -3622,7 +3624,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) | |||
3622 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 3624 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
3623 | memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); | 3625 | memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); |
3624 | 3626 | ||
3625 | ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); | 3627 | adapter->hw.mac.ops.set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); |
3626 | 3628 | ||
3627 | return 0; | 3629 | return 0; |
3628 | } | 3630 | } |
@@ -3646,6 +3648,22 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
3646 | #endif | 3648 | #endif |
3647 | 3649 | ||
3648 | /** | 3650 | /** |
3651 | * ixgbe_link_config - set up initial link with default speed and duplex | ||
3652 | * @hw: pointer to private hardware struct | ||
3653 | * | ||
3654 | * Returns 0 on success, negative on failure | ||
3655 | **/ | ||
3656 | static int ixgbe_link_config(struct ixgbe_hw *hw) | ||
3657 | { | ||
3658 | u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL; | ||
3659 | |||
3660 | /* must always autoneg for both 1G and 10G link */ | ||
3661 | hw->mac.autoneg = true; | ||
3662 | |||
3663 | return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); | ||
3664 | } | ||
3665 | |||
3666 | /** | ||
3649 | * ixgbe_napi_add_all - prep napi structs for use | 3667 | * ixgbe_napi_add_all - prep napi structs for use |
3650 | * @adapter: private struct | 3668 | * @adapter: private struct |
3651 | * helper function to napi_add each possible q_vector->napi | 3669 | * helper function to napi_add each possible q_vector->napi |
@@ -3691,7 +3709,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3691 | static int cards_found; | 3709 | static int cards_found; |
3692 | int i, err, pci_using_dac; | 3710 | int i, err, pci_using_dac; |
3693 | u16 link_status, link_speed, link_width; | 3711 | u16 link_status, link_speed, link_width; |
3694 | u32 part_num; | 3712 | u32 part_num, eec; |
3695 | 3713 | ||
3696 | err = pci_enable_device(pdev); | 3714 | err = pci_enable_device(pdev); |
3697 | if (err) | 3715 | if (err) |
@@ -3705,8 +3723,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3705 | if (err) { | 3723 | if (err) { |
3706 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 3724 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
3707 | if (err) { | 3725 | if (err) { |
3708 | dev_err(&pdev->dev, "No usable DMA " | 3726 | dev_err(&pdev->dev, "No usable DMA configuration, " |
3709 | "configuration, aborting\n"); | 3727 | "aborting\n"); |
3710 | goto err_dma; | 3728 | goto err_dma; |
3711 | } | 3729 | } |
3712 | } | 3730 | } |
@@ -3772,17 +3790,21 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3772 | 3790 | ||
3773 | adapter->bd_number = cards_found; | 3791 | adapter->bd_number = cards_found; |
3774 | 3792 | ||
3775 | /* PCI config space info */ | ||
3776 | hw->vendor_id = pdev->vendor; | ||
3777 | hw->device_id = pdev->device; | ||
3778 | hw->revision_id = pdev->revision; | ||
3779 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | ||
3780 | hw->subsystem_device_id = pdev->subsystem_device; | ||
3781 | |||
3782 | /* Setup hw api */ | 3793 | /* Setup hw api */ |
3783 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); | 3794 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); |
3784 | hw->mac.type = ii->mac; | 3795 | hw->mac.type = ii->mac; |
3785 | 3796 | ||
3797 | /* EEPROM */ | ||
3798 | memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); | ||
3799 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
3800 | /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ | ||
3801 | if (!(eec & (1 << 8))) | ||
3802 | hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; | ||
3803 | |||
3804 | /* PHY */ | ||
3805 | memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); | ||
3806 | /* phy->sfp_type = ixgbe_sfp_type_unknown; */ | ||
3807 | |||
3786 | err = ii->get_invariants(hw); | 3808 | err = ii->get_invariants(hw); |
3787 | if (err) | 3809 | if (err) |
3788 | goto err_hw_init; | 3810 | goto err_hw_init; |
@@ -3792,6 +3814,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3792 | if (err) | 3814 | if (err) |
3793 | goto err_sw_init; | 3815 | goto err_sw_init; |
3794 | 3816 | ||
3817 | /* reset_hw fills in the perm_addr as well */ | ||
3818 | err = hw->mac.ops.reset_hw(hw); | ||
3819 | if (err) { | ||
3820 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); | ||
3821 | goto err_sw_init; | ||
3822 | } | ||
3823 | |||
3795 | netdev->features = NETIF_F_SG | | 3824 | netdev->features = NETIF_F_SG | |
3796 | NETIF_F_IP_CSUM | | 3825 | NETIF_F_IP_CSUM | |
3797 | NETIF_F_HW_VLAN_TX | | 3826 | NETIF_F_HW_VLAN_TX | |
@@ -3812,7 +3841,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3812 | netdev->features |= NETIF_F_HIGHDMA; | 3841 | netdev->features |= NETIF_F_HIGHDMA; |
3813 | 3842 | ||
3814 | /* make sure the EEPROM is good */ | 3843 | /* make sure the EEPROM is good */ |
3815 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { | 3844 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
3816 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); | 3845 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); |
3817 | err = -EIO; | 3846 | err = -EIO; |
3818 | goto err_eeprom; | 3847 | goto err_eeprom; |
@@ -3821,7 +3850,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3821 | memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); | 3850 | memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); |
3822 | memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); | 3851 | memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); |
3823 | 3852 | ||
3824 | if (ixgbe_validate_mac_addr(netdev->dev_addr)) { | 3853 | if (ixgbe_validate_mac_addr(netdev->perm_addr)) { |
3854 | dev_err(&pdev->dev, "invalid MAC address\n"); | ||
3825 | err = -EIO; | 3855 | err = -EIO; |
3826 | goto err_eeprom; | 3856 | goto err_eeprom; |
3827 | } | 3857 | } |
@@ -3853,7 +3883,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3853 | "Unknown"), | 3883 | "Unknown"), |
3854 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | 3884 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], |
3855 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); | 3885 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); |
3856 | ixgbe_read_part_num(hw, &part_num); | 3886 | ixgbe_read_pba_num_generic(hw, &part_num); |
3857 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 3887 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
3858 | hw->mac.type, hw->phy.type, | 3888 | hw->mac.type, hw->phy.type, |
3859 | (part_num >> 8), (part_num & 0xff)); | 3889 | (part_num >> 8), (part_num & 0xff)); |
@@ -3867,7 +3897,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3867 | } | 3897 | } |
3868 | 3898 | ||
3869 | /* reset the hardware with the new settings */ | 3899 | /* reset the hardware with the new settings */ |
3870 | ixgbe_start_hw(hw); | 3900 | hw->mac.ops.start_hw(hw); |
3901 | |||
3902 | /* link_config depends on start_hw being called at least once */ | ||
3903 | err = ixgbe_link_config(hw); | ||
3904 | if (err) { | ||
3905 | dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err); | ||
3906 | goto err_register; | ||
3907 | } | ||
3871 | 3908 | ||
3872 | netif_carrier_off(netdev); | 3909 | netif_carrier_off(netdev); |
3873 | netif_tx_stop_all_queues(netdev); | 3910 | netif_tx_stop_all_queues(netdev); |
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 8002931ae823..63a701762413 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -33,32 +33,36 @@ | |||
33 | #include "ixgbe_common.h" | 33 | #include "ixgbe_common.h" |
34 | #include "ixgbe_phy.h" | 34 | #include "ixgbe_phy.h" |
35 | 35 | ||
36 | static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); | ||
36 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); | 37 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); |
37 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); | 38 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); |
38 | static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); | ||
39 | static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | ||
40 | u32 device_type, u16 phy_data); | ||
41 | 39 | ||
42 | /** | 40 | /** |
43 | * ixgbe_identify_phy - Get physical layer module | 41 | * ixgbe_identify_phy_generic - Get physical layer module |
44 | * @hw: pointer to hardware structure | 42 | * @hw: pointer to hardware structure |
45 | * | 43 | * |
46 | * Determines the physical layer module found on the current adapter. | 44 | * Determines the physical layer module found on the current adapter. |
47 | **/ | 45 | **/ |
48 | s32 ixgbe_identify_phy(struct ixgbe_hw *hw) | 46 | s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) |
49 | { | 47 | { |
50 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 48 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
51 | u32 phy_addr; | 49 | u32 phy_addr; |
52 | 50 | ||
53 | for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { | 51 | if (hw->phy.type == ixgbe_phy_unknown) { |
54 | if (ixgbe_validate_phy_addr(hw, phy_addr)) { | 52 | for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { |
55 | hw->phy.addr = phy_addr; | 53 | if (ixgbe_validate_phy_addr(hw, phy_addr)) { |
56 | ixgbe_get_phy_id(hw); | 54 | hw->phy.addr = phy_addr; |
57 | hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); | 55 | ixgbe_get_phy_id(hw); |
58 | status = 0; | 56 | hw->phy.type = |
59 | break; | 57 | ixgbe_get_phy_type_from_id(hw->phy.id); |
58 | status = 0; | ||
59 | break; | ||
60 | } | ||
60 | } | 61 | } |
62 | } else { | ||
63 | status = 0; | ||
61 | } | 64 | } |
65 | |||
62 | return status; | 66 | return status; |
63 | } | 67 | } |
64 | 68 | ||
@@ -73,10 +77,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) | |||
73 | bool valid = false; | 77 | bool valid = false; |
74 | 78 | ||
75 | hw->phy.addr = phy_addr; | 79 | hw->phy.addr = phy_addr; |
76 | ixgbe_read_phy_reg(hw, | 80 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, |
77 | IXGBE_MDIO_PHY_ID_HIGH, | 81 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); |
78 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | ||
79 | &phy_id); | ||
80 | 82 | ||
81 | if (phy_id != 0xFFFF && phy_id != 0x0) | 83 | if (phy_id != 0xFFFF && phy_id != 0x0) |
82 | valid = true; | 84 | valid = true; |
@@ -95,21 +97,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) | |||
95 | u16 phy_id_high = 0; | 97 | u16 phy_id_high = 0; |
96 | u16 phy_id_low = 0; | 98 | u16 phy_id_low = 0; |
97 | 99 | ||
98 | status = ixgbe_read_phy_reg(hw, | 100 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, |
99 | IXGBE_MDIO_PHY_ID_HIGH, | 101 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
100 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | 102 | &phy_id_high); |
101 | &phy_id_high); | ||
102 | 103 | ||
103 | if (status == 0) { | 104 | if (status == 0) { |
104 | hw->phy.id = (u32)(phy_id_high << 16); | 105 | hw->phy.id = (u32)(phy_id_high << 16); |
105 | status = ixgbe_read_phy_reg(hw, | 106 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, |
106 | IXGBE_MDIO_PHY_ID_LOW, | 107 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
107 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | 108 | &phy_id_low); |
108 | &phy_id_low); | ||
109 | hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); | 109 | hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); |
110 | hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); | 110 | hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); |
111 | } | 111 | } |
112 | |||
113 | return status; | 112 | return status; |
114 | } | 113 | } |
115 | 114 | ||
@@ -123,9 +122,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) | |||
123 | enum ixgbe_phy_type phy_type; | 122 | enum ixgbe_phy_type phy_type; |
124 | 123 | ||
125 | switch (phy_id) { | 124 | switch (phy_id) { |
126 | case TN1010_PHY_ID: | ||
127 | phy_type = ixgbe_phy_tn; | ||
128 | break; | ||
129 | case QT2022_PHY_ID: | 125 | case QT2022_PHY_ID: |
130 | phy_type = ixgbe_phy_qt; | 126 | phy_type = ixgbe_phy_qt; |
131 | break; | 127 | break; |
@@ -138,32 +134,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) | |||
138 | } | 134 | } |
139 | 135 | ||
140 | /** | 136 | /** |
141 | * ixgbe_reset_phy - Performs a PHY reset | 137 | * ixgbe_reset_phy_generic - Performs a PHY reset |
142 | * @hw: pointer to hardware structure | 138 | * @hw: pointer to hardware structure |
143 | **/ | 139 | **/ |
144 | s32 ixgbe_reset_phy(struct ixgbe_hw *hw) | 140 | s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) |
145 | { | 141 | { |
146 | /* | 142 | /* |
147 | * Perform soft PHY reset to the PHY_XS. | 143 | * Perform soft PHY reset to the PHY_XS. |
148 | * This will cause a soft reset to the PHY | 144 | * This will cause a soft reset to the PHY |
149 | */ | 145 | */ |
150 | return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, | 146 | return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, |
151 | IXGBE_MDIO_PHY_XS_DEV_TYPE, | 147 | IXGBE_MDIO_PHY_XS_DEV_TYPE, |
152 | IXGBE_MDIO_PHY_XS_RESET); | 148 | IXGBE_MDIO_PHY_XS_RESET); |
153 | } | 149 | } |
154 | 150 | ||
155 | /** | 151 | /** |
156 | * ixgbe_read_phy_reg - Reads a value from a specified PHY register | 152 | * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register |
157 | * @hw: pointer to hardware structure | 153 | * @hw: pointer to hardware structure |
158 | * @reg_addr: 32 bit address of PHY register to read | 154 | * @reg_addr: 32 bit address of PHY register to read |
159 | * @phy_data: Pointer to read data from PHY register | 155 | * @phy_data: Pointer to read data from PHY register |
160 | **/ | 156 | **/ |
161 | s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | 157 | s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, |
162 | u32 device_type, u16 *phy_data) | 158 | u32 device_type, u16 *phy_data) |
163 | { | 159 | { |
164 | u32 command; | 160 | u32 command; |
165 | u32 i; | 161 | u32 i; |
166 | u32 timeout = 10; | ||
167 | u32 data; | 162 | u32 data; |
168 | s32 status = 0; | 163 | s32 status = 0; |
169 | u16 gssr; | 164 | u16 gssr; |
@@ -179,9 +174,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
179 | if (status == 0) { | 174 | if (status == 0) { |
180 | /* Setup and write the address cycle command */ | 175 | /* Setup and write the address cycle command */ |
181 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 176 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
182 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 177 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
183 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 178 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
184 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); | 179 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); |
185 | 180 | ||
186 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 181 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
187 | 182 | ||
@@ -190,7 +185,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
190 | * The MDI Command bit will clear when the operation is | 185 | * The MDI Command bit will clear when the operation is |
191 | * complete | 186 | * complete |
192 | */ | 187 | */ |
193 | for (i = 0; i < timeout; i++) { | 188 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
194 | udelay(10); | 189 | udelay(10); |
195 | 190 | ||
196 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 191 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
@@ -210,9 +205,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
210 | * command | 205 | * command |
211 | */ | 206 | */ |
212 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 207 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
213 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 208 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
214 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 209 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
215 | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); | 210 | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); |
216 | 211 | ||
217 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 212 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
218 | 213 | ||
@@ -221,7 +216,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
221 | * completed. The MDI Command bit will clear when the | 216 | * completed. The MDI Command bit will clear when the |
222 | * operation is complete | 217 | * operation is complete |
223 | */ | 218 | */ |
224 | for (i = 0; i < timeout; i++) { | 219 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
225 | udelay(10); | 220 | udelay(10); |
226 | 221 | ||
227 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 222 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
@@ -231,8 +226,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
231 | } | 226 | } |
232 | 227 | ||
233 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { | 228 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { |
234 | hw_dbg(hw, | 229 | hw_dbg(hw, "PHY read command didn't complete\n"); |
235 | "PHY read command didn't complete\n"); | ||
236 | status = IXGBE_ERR_PHY; | 230 | status = IXGBE_ERR_PHY; |
237 | } else { | 231 | } else { |
238 | /* | 232 | /* |
@@ -247,22 +241,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
247 | 241 | ||
248 | ixgbe_release_swfw_sync(hw, gssr); | 242 | ixgbe_release_swfw_sync(hw, gssr); |
249 | } | 243 | } |
244 | |||
250 | return status; | 245 | return status; |
251 | } | 246 | } |
252 | 247 | ||
253 | /** | 248 | /** |
254 | * ixgbe_write_phy_reg - Writes a value to specified PHY register | 249 | * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register |
255 | * @hw: pointer to hardware structure | 250 | * @hw: pointer to hardware structure |
256 | * @reg_addr: 32 bit PHY register to write | 251 | * @reg_addr: 32 bit PHY register to write |
257 | * @device_type: 5 bit device type | 252 | * @device_type: 5 bit device type |
258 | * @phy_data: Data to write to the PHY register | 253 | * @phy_data: Data to write to the PHY register |
259 | **/ | 254 | **/ |
260 | static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | 255 | s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, |
261 | u32 device_type, u16 phy_data) | 256 | u32 device_type, u16 phy_data) |
262 | { | 257 | { |
263 | u32 command; | 258 | u32 command; |
264 | u32 i; | 259 | u32 i; |
265 | u32 timeout = 10; | ||
266 | s32 status = 0; | 260 | s32 status = 0; |
267 | u16 gssr; | 261 | u16 gssr; |
268 | 262 | ||
@@ -280,9 +274,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
280 | 274 | ||
281 | /* Setup and write the address cycle command */ | 275 | /* Setup and write the address cycle command */ |
282 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 276 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
283 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 277 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
284 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 278 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
285 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); | 279 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); |
286 | 280 | ||
287 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 281 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
288 | 282 | ||
@@ -291,19 +285,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
291 | * The MDI Command bit will clear when the operation is | 285 | * The MDI Command bit will clear when the operation is |
292 | * complete | 286 | * complete |
293 | */ | 287 | */ |
294 | for (i = 0; i < timeout; i++) { | 288 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
295 | udelay(10); | 289 | udelay(10); |
296 | 290 | ||
297 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 291 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
298 | 292 | ||
299 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { | 293 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) |
300 | hw_dbg(hw, "PHY address cmd didn't complete\n"); | ||
301 | break; | 294 | break; |
302 | } | ||
303 | } | 295 | } |
304 | 296 | ||
305 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) | 297 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { |
298 | hw_dbg(hw, "PHY address cmd didn't complete\n"); | ||
306 | status = IXGBE_ERR_PHY; | 299 | status = IXGBE_ERR_PHY; |
300 | } | ||
307 | 301 | ||
308 | if (status == 0) { | 302 | if (status == 0) { |
309 | /* | 303 | /* |
@@ -311,9 +305,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
311 | * command | 305 | * command |
312 | */ | 306 | */ |
313 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 307 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
314 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 308 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
315 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 309 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
316 | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); | 310 | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); |
317 | 311 | ||
318 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 312 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
319 | 313 | ||
@@ -322,20 +316,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
322 | * completed. The MDI Command bit will clear when the | 316 | * completed. The MDI Command bit will clear when the |
323 | * operation is complete | 317 | * operation is complete |
324 | */ | 318 | */ |
325 | for (i = 0; i < timeout; i++) { | 319 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
326 | udelay(10); | 320 | udelay(10); |
327 | 321 | ||
328 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 322 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
329 | 323 | ||
330 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { | 324 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) |
331 | hw_dbg(hw, "PHY write command did not " | ||
332 | "complete.\n"); | ||
333 | break; | 325 | break; |
334 | } | ||
335 | } | 326 | } |
336 | 327 | ||
337 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) | 328 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { |
329 | hw_dbg(hw, "PHY address cmd didn't complete\n"); | ||
338 | status = IXGBE_ERR_PHY; | 330 | status = IXGBE_ERR_PHY; |
331 | } | ||
339 | } | 332 | } |
340 | 333 | ||
341 | ixgbe_release_swfw_sync(hw, gssr); | 334 | ixgbe_release_swfw_sync(hw, gssr); |
@@ -345,67 +338,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
345 | } | 338 | } |
346 | 339 | ||
347 | /** | 340 | /** |
348 | * ixgbe_setup_tnx_phy_link - Set and restart autoneg | 341 | * ixgbe_setup_phy_link_generic - Set and restart autoneg |
349 | * @hw: pointer to hardware structure | 342 | * @hw: pointer to hardware structure |
350 | * | 343 | * |
351 | * Restart autonegotiation and PHY and waits for completion. | 344 | * Restart autonegotiation and PHY and waits for completion. |
352 | **/ | 345 | **/ |
353 | s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) | 346 | s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) |
354 | { | 347 | { |
355 | s32 status = IXGBE_NOT_IMPLEMENTED; | 348 | s32 status = IXGBE_NOT_IMPLEMENTED; |
356 | u32 time_out; | 349 | u32 time_out; |
357 | u32 max_time_out = 10; | 350 | u32 max_time_out = 10; |
358 | u16 autoneg_speed_selection_register = 0x10; | 351 | u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; |
359 | u16 autoneg_restart_mask = 0x0200; | ||
360 | u16 autoneg_complete_mask = 0x0020; | ||
361 | u16 autoneg_reg = 0; | ||
362 | 352 | ||
363 | /* | 353 | /* |
364 | * Set advertisement settings in PHY based on autoneg_advertised | 354 | * Set advertisement settings in PHY based on autoneg_advertised |
365 | * settings. If autoneg_advertised = 0, then advertise default values | 355 | * settings. If autoneg_advertised = 0, then advertise default values |
366 | * txn devices cannot be "forced" to a autoneg 10G and fail. But can | 356 | * tnx devices cannot be "forced" to a autoneg 10G and fail. But can |
367 | * for a 1G. | 357 | * for a 1G. |
368 | */ | 358 | */ |
369 | ixgbe_read_phy_reg(hw, | 359 | hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, |
370 | autoneg_speed_selection_register, | 360 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); |
371 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
372 | &autoneg_reg); | ||
373 | 361 | ||
374 | if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) | 362 | if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) |
375 | autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ | 363 | autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ |
376 | else | 364 | else |
377 | autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ | 365 | autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ |
378 | 366 | ||
379 | ixgbe_write_phy_reg(hw, | 367 | hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, |
380 | autoneg_speed_selection_register, | 368 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); |
381 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
382 | autoneg_reg); | ||
383 | |||
384 | 369 | ||
385 | /* Restart PHY autonegotiation and wait for completion */ | 370 | /* Restart PHY autonegotiation and wait for completion */ |
386 | ixgbe_read_phy_reg(hw, | 371 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, |
387 | IXGBE_MDIO_AUTO_NEG_CONTROL, | 372 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); |
388 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
389 | &autoneg_reg); | ||
390 | 373 | ||
391 | autoneg_reg |= autoneg_restart_mask; | 374 | autoneg_reg |= IXGBE_MII_RESTART; |
392 | 375 | ||
393 | ixgbe_write_phy_reg(hw, | 376 | hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, |
394 | IXGBE_MDIO_AUTO_NEG_CONTROL, | 377 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); |
395 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
396 | autoneg_reg); | ||
397 | 378 | ||
398 | /* Wait for autonegotiation to finish */ | 379 | /* Wait for autonegotiation to finish */ |
399 | for (time_out = 0; time_out < max_time_out; time_out++) { | 380 | for (time_out = 0; time_out < max_time_out; time_out++) { |
400 | udelay(10); | 381 | udelay(10); |
401 | /* Restart PHY autonegotiation and wait for completion */ | 382 | /* Restart PHY autonegotiation and wait for completion */ |
402 | status = ixgbe_read_phy_reg(hw, | 383 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, |
403 | IXGBE_MDIO_AUTO_NEG_STATUS, | 384 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, |
404 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | 385 | &autoneg_reg); |
405 | &autoneg_reg); | ||
406 | 386 | ||
407 | autoneg_reg &= autoneg_complete_mask; | 387 | autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; |
408 | if (autoneg_reg == autoneg_complete_mask) { | 388 | if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { |
409 | status = 0; | 389 | status = 0; |
410 | break; | 390 | break; |
411 | } | 391 | } |
@@ -418,64 +398,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) | |||
418 | } | 398 | } |
419 | 399 | ||
420 | /** | 400 | /** |
421 | * ixgbe_check_tnx_phy_link - Determine link and speed status | 401 | * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities |
422 | * @hw: pointer to hardware structure | ||
423 | * | ||
424 | * Reads the VS1 register to determine if link is up and the current speed for | ||
425 | * the PHY. | ||
426 | **/ | ||
427 | s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, | ||
428 | bool *link_up) | ||
429 | { | ||
430 | s32 status = 0; | ||
431 | u32 time_out; | ||
432 | u32 max_time_out = 10; | ||
433 | u16 phy_link = 0; | ||
434 | u16 phy_speed = 0; | ||
435 | u16 phy_data = 0; | ||
436 | |||
437 | /* Initialize speed and link to default case */ | ||
438 | *link_up = false; | ||
439 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | ||
440 | |||
441 | /* | ||
442 | * Check current speed and link status of the PHY register. | ||
443 | * This is a vendor specific register and may have to | ||
444 | * be changed for other copper PHYs. | ||
445 | */ | ||
446 | for (time_out = 0; time_out < max_time_out; time_out++) { | ||
447 | udelay(10); | ||
448 | if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { | ||
449 | *link_up = true; | ||
450 | if (phy_speed == | ||
451 | IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) | ||
452 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | ||
453 | break; | ||
454 | } else { | ||
455 | status = ixgbe_read_phy_reg(hw, | ||
456 | IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, | ||
457 | IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, | ||
458 | &phy_data); | ||
459 | phy_link = phy_data & | ||
460 | IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; | ||
461 | phy_speed = phy_data & | ||
462 | IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | return status; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities | ||
471 | * @hw: pointer to hardware structure | 402 | * @hw: pointer to hardware structure |
472 | * @speed: new link speed | 403 | * @speed: new link speed |
473 | * @autoneg: true if autonegotiation enabled | 404 | * @autoneg: true if autonegotiation enabled |
474 | **/ | 405 | **/ |
475 | s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, | 406 | s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, |
476 | bool autoneg, | 407 | ixgbe_link_speed speed, |
477 | bool autoneg_wait_to_complete) | 408 | bool autoneg, |
409 | bool autoneg_wait_to_complete) | ||
478 | { | 410 | { |
411 | |||
479 | /* | 412 | /* |
480 | * Clear autoneg_advertised and set new values based on input link | 413 | * Clear autoneg_advertised and set new values based on input link |
481 | * speed. | 414 | * speed. |
@@ -484,11 +417,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, | |||
484 | 417 | ||
485 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) | 418 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) |
486 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; | 419 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; |
420 | |||
487 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) | 421 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) |
488 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; | 422 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; |
489 | 423 | ||
490 | /* Setup link based on the new speed settings */ | 424 | /* Setup link based on the new speed settings */ |
491 | ixgbe_setup_tnx_phy_link(hw); | 425 | hw->phy.ops.setup_link(hw); |
492 | 426 | ||
493 | return 0; | 427 | return 0; |
494 | } | 428 | } |
429 | |||
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index aa3ea72e678e..f88c9131a01c 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h | |||
@@ -30,20 +30,52 @@ | |||
30 | #define _IXGBE_PHY_H_ | 30 | #define _IXGBE_PHY_H_ |
31 | 31 | ||
32 | #include "ixgbe_type.h" | 32 | #include "ixgbe_type.h" |
33 | #define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 | ||
33 | 34 | ||
34 | s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); | 35 | /* EEPROM byte offsets */ |
35 | s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); | 36 | #define IXGBE_SFF_IDENTIFIER 0x0 |
36 | s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, | 37 | #define IXGBE_SFF_IDENTIFIER_SFP 0x3 |
37 | bool autoneg_wait_to_complete); | 38 | #define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 |
38 | s32 ixgbe_identify_phy(struct ixgbe_hw *hw); | 39 | #define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 |
39 | s32 ixgbe_reset_phy(struct ixgbe_hw *hw); | 40 | #define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 |
40 | s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | 41 | #define IXGBE_SFF_1GBE_COMP_CODES 0x6 |
41 | u32 device_type, u16 *phy_data); | 42 | #define IXGBE_SFF_10GBE_COMP_CODES 0x3 |
42 | 43 | #define IXGBE_SFF_TRANSMISSION_MEDIA 0x9 | |
43 | /* PHY specific */ | 44 | |
44 | s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); | 45 | /* Bitmasks */ |
45 | s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); | 46 | #define IXGBE_SFF_TWIN_AX_CAPABLE 0x80 |
46 | s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, | 47 | #define IXGBE_SFF_1GBASESX_CAPABLE 0x1 |
47 | bool autoneg_wait_to_complete); | 48 | #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 |
49 | #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 | ||
50 | #define IXGBE_I2C_EEPROM_READ_MASK 0x100 | ||
51 | #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 | ||
52 | #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 | ||
53 | #define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 | ||
54 | #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 | ||
55 | #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 | ||
56 | |||
57 | /* Bit-shift macros */ | ||
58 | #define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12 | ||
59 | #define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8 | ||
60 | #define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4 | ||
61 | |||
62 | /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ | ||
63 | #define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 | ||
64 | #define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 | ||
65 | #define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 | ||
66 | |||
67 | |||
68 | s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); | ||
69 | s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); | ||
70 | s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); | ||
71 | s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, | ||
72 | u32 device_type, u16 *phy_data); | ||
73 | s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, | ||
74 | u32 device_type, u16 phy_data); | ||
75 | s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); | ||
76 | s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, | ||
77 | ixgbe_link_speed speed, | ||
78 | bool autoneg, | ||
79 | bool autoneg_wait_to_complete); | ||
48 | 80 | ||
49 | #endif /* _IXGBE_PHY_H_ */ | 81 | #endif /* _IXGBE_PHY_H_ */ |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 7057aa3f3938..c76e30b94d89 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -37,7 +37,6 @@ | |||
37 | /* Device IDs */ | 37 | /* Device IDs */ |
38 | #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 | 38 | #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 |
39 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 | 39 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 |
40 | #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 | ||
41 | #define IXGBE_DEV_ID_82598EB_CX4 0x10DD | 40 | #define IXGBE_DEV_ID_82598EB_CX4 0x10DD |
42 | #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC | 41 | #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC |
43 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 | 42 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 |
@@ -71,11 +70,11 @@ | |||
71 | #define IXGBE_EIMC 0x00888 | 70 | #define IXGBE_EIMC 0x00888 |
72 | #define IXGBE_EIAC 0x00810 | 71 | #define IXGBE_EIAC 0x00810 |
73 | #define IXGBE_EIAM 0x00890 | 72 | #define IXGBE_EIAM 0x00890 |
74 | #define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ | 73 | #define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4))) |
75 | #define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ | 74 | #define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ |
76 | #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ | 75 | #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ |
77 | #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ | 76 | #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ |
78 | #define IXGBE_PBACL 0x11068 | 77 | #define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) |
79 | #define IXGBE_GPIE 0x00898 | 78 | #define IXGBE_GPIE 0x00898 |
80 | 79 | ||
81 | /* Flow Control Registers */ | 80 | /* Flow Control Registers */ |
@@ -87,20 +86,33 @@ | |||
87 | #define IXGBE_TFCS 0x0CE00 | 86 | #define IXGBE_TFCS 0x0CE00 |
88 | 87 | ||
89 | /* Receive DMA Registers */ | 88 | /* Receive DMA Registers */ |
90 | #define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ | 89 | #define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40))) |
91 | #define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) | 90 | #define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40))) |
92 | #define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) | 91 | #define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40))) |
93 | #define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) | 92 | #define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40))) |
94 | #define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) | 93 | #define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40))) |
95 | #define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) | 94 | #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40))) |
96 | #define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) | 95 | /* |
97 | #define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) | 96 | * Split and Replication Receive Control Registers |
98 | /* array of 16 (0x02100-0x0213C) */ | 97 | * 00-15 : 0x02100 + n*4 |
99 | #define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) | 98 | * 16-64 : 0x01014 + n*0x40 |
100 | /* array of 16 (0x02200-0x0223C) */ | 99 | * 64-127: 0x0D014 + (n-64)*0x40 |
101 | #define IXGBE_RDRXCTL 0x02F00 | 100 | */ |
101 | #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ | ||
102 | (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ | ||
103 | (0x0D014 + ((_i - 64) * 0x40)))) | ||
104 | /* | ||
105 | * Rx DCA Control Register: | ||
106 | * 00-15 : 0x02200 + n*4 | ||
107 | * 16-64 : 0x0100C + n*0x40 | ||
108 | * 64-127: 0x0D00C + (n-64)*0x40 | ||
109 | */ | ||
110 | #define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ | ||
111 | (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ | ||
112 | (0x0D00C + ((_i - 64) * 0x40)))) | ||
113 | #define IXGBE_RDRXCTL 0x02F00 | ||
102 | #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) | 114 | #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) |
103 | /* 8 of these 0x03C00 - 0x03C1C */ | 115 | /* 8 of these 0x03C00 - 0x03C1C */ |
104 | #define IXGBE_RXCTRL 0x03000 | 116 | #define IXGBE_RXCTRL 0x03000 |
105 | #define IXGBE_DROPEN 0x03D04 | 117 | #define IXGBE_DROPEN 0x03D04 |
106 | #define IXGBE_RXPBSIZE_SHIFT 10 | 118 | #define IXGBE_RXPBSIZE_SHIFT 10 |
@@ -108,29 +120,32 @@ | |||
108 | /* Receive Registers */ | 120 | /* Receive Registers */ |
109 | #define IXGBE_RXCSUM 0x05000 | 121 | #define IXGBE_RXCSUM 0x05000 |
110 | #define IXGBE_RFCTL 0x05008 | 122 | #define IXGBE_RFCTL 0x05008 |
123 | #define IXGBE_DRECCCTL 0x02F08 | ||
124 | #define IXGBE_DRECCCTL_DISABLE 0 | ||
125 | /* Multicast Table Array - 128 entries */ | ||
111 | #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) | 126 | #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) |
112 | /* Multicast Table Array - 128 entries */ | 127 | #define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8))) |
113 | #define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ | 128 | #define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8))) |
114 | #define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ | 129 | /* Packet split receive type */ |
115 | #define IXGBE_PSRTYPE 0x05480 | 130 | #define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4))) |
116 | /* 0x5480-0x54BC Packet split receive type */ | 131 | /* array of 4096 1-bit vlan filters */ |
117 | #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) | 132 | #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) |
118 | /* array of 4096 1-bit vlan filters */ | 133 | /*array of 4096 4-bit vlan vmdq indices */ |
119 | #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) | 134 | #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) |
120 | /*array of 4096 4-bit vlan vmdq indicies */ | ||
121 | #define IXGBE_FCTRL 0x05080 | 135 | #define IXGBE_FCTRL 0x05080 |
122 | #define IXGBE_VLNCTRL 0x05088 | 136 | #define IXGBE_VLNCTRL 0x05088 |
123 | #define IXGBE_MCSTCTRL 0x05090 | 137 | #define IXGBE_MCSTCTRL 0x05090 |
124 | #define IXGBE_MRQC 0x05818 | 138 | #define IXGBE_MRQC 0x05818 |
125 | #define IXGBE_VMD_CTL 0x0581C | ||
126 | #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ | 139 | #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ |
127 | #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ | 140 | #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ |
128 | #define IXGBE_IMIRVP 0x05AC0 | 141 | #define IXGBE_IMIRVP 0x05AC0 |
142 | #define IXGBE_VMD_CTL 0x0581C | ||
129 | #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ | 143 | #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ |
130 | #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ | 144 | #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ |
131 | 145 | ||
146 | |||
132 | /* Transmit DMA registers */ | 147 | /* Transmit DMA registers */ |
133 | #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ | 148 | #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ |
134 | #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) | 149 | #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) |
135 | #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) | 150 | #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) |
136 | #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) | 151 | #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) |
@@ -139,11 +154,10 @@ | |||
139 | #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) | 154 | #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) |
140 | #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) | 155 | #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) |
141 | #define IXGBE_DTXCTL 0x07E00 | 156 | #define IXGBE_DTXCTL 0x07E00 |
142 | #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) | 157 | |
143 | /* there are 16 of these (0-15) */ | 158 | #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ |
144 | #define IXGBE_TIPG 0x0CB00 | 159 | #define IXGBE_TIPG 0x0CB00 |
145 | #define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) | 160 | #define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ |
146 | /* there are 8 of these */ | ||
147 | #define IXGBE_MNGTXMAP 0x0CD10 | 161 | #define IXGBE_MNGTXMAP 0x0CD10 |
148 | #define IXGBE_TIPG_FIBER_DEFAULT 3 | 162 | #define IXGBE_TIPG_FIBER_DEFAULT 3 |
149 | #define IXGBE_TXPBSIZE_SHIFT 10 | 163 | #define IXGBE_TXPBSIZE_SHIFT 10 |
@@ -155,6 +169,7 @@ | |||
155 | #define IXGBE_IPAV 0x05838 | 169 | #define IXGBE_IPAV 0x05838 |
156 | #define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ | 170 | #define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ |
157 | #define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ | 171 | #define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ |
172 | |||
158 | #define IXGBE_WUPL 0x05900 | 173 | #define IXGBE_WUPL 0x05900 |
159 | #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ | 174 | #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ |
160 | #define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ | 175 | #define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ |
@@ -171,6 +186,8 @@ | |||
171 | #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ | 186 | #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ |
172 | #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ | 187 | #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ |
173 | 188 | ||
189 | |||
190 | |||
174 | /* Stats registers */ | 191 | /* Stats registers */ |
175 | #define IXGBE_CRCERRS 0x04000 | 192 | #define IXGBE_CRCERRS 0x04000 |
176 | #define IXGBE_ILLERRC 0x04004 | 193 | #define IXGBE_ILLERRC 0x04004 |
@@ -225,7 +242,7 @@ | |||
225 | #define IXGBE_XEC 0x04120 | 242 | #define IXGBE_XEC 0x04120 |
226 | 243 | ||
227 | #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ | 244 | #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ |
228 | #define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ | 245 | #define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4))) |
229 | 246 | ||
230 | #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ | 247 | #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ |
231 | #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ | 248 | #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ |
@@ -276,17 +293,17 @@ | |||
276 | #define IXGBE_DCA_CTRL 0x11074 | 293 | #define IXGBE_DCA_CTRL 0x11074 |
277 | 294 | ||
278 | /* Diagnostic Registers */ | 295 | /* Diagnostic Registers */ |
279 | #define IXGBE_RDSTATCTL 0x02C20 | 296 | #define IXGBE_RDSTATCTL 0x02C20 |
280 | #define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ | 297 | #define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ |
281 | #define IXGBE_RDHMPN 0x02F08 | 298 | #define IXGBE_RDHMPN 0x02F08 |
282 | #define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) | 299 | #define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) |
283 | #define IXGBE_RDPROBE 0x02F20 | 300 | #define IXGBE_RDPROBE 0x02F20 |
284 | #define IXGBE_TDSTATCTL 0x07C20 | 301 | #define IXGBE_TDSTATCTL 0x07C20 |
285 | #define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ | 302 | #define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ |
286 | #define IXGBE_TDHMPN 0x07F08 | 303 | #define IXGBE_TDHMPN 0x07F08 |
287 | #define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) | 304 | #define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) |
288 | #define IXGBE_TDPROBE 0x07F20 | 305 | #define IXGBE_TDPROBE 0x07F20 |
289 | #define IXGBE_TXBUFCTRL 0x0C600 | 306 | #define IXGBE_TXBUFCTRL 0x0C600 |
290 | #define IXGBE_TXBUFDATA0 0x0C610 | 307 | #define IXGBE_TXBUFDATA0 0x0C610 |
291 | #define IXGBE_TXBUFDATA1 0x0C614 | 308 | #define IXGBE_TXBUFDATA1 0x0C614 |
292 | #define IXGBE_TXBUFDATA2 0x0C618 | 309 | #define IXGBE_TXBUFDATA2 0x0C618 |
@@ -387,7 +404,7 @@ | |||
387 | 404 | ||
388 | #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ | 405 | #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ |
389 | #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ | 406 | #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ |
390 | #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ | 407 | #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ |
391 | #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ | 408 | #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ |
392 | 409 | ||
393 | /* MSCA Bit Masks */ | 410 | /* MSCA Bit Masks */ |
@@ -411,10 +428,10 @@ | |||
411 | #define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ | 428 | #define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ |
412 | 429 | ||
413 | /* MSRWD bit masks */ | 430 | /* MSRWD bit masks */ |
414 | #define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF | 431 | #define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF |
415 | #define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 | 432 | #define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 |
416 | #define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 | 433 | #define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 |
417 | #define IXGBE_MSRWD_READ_DATA_SHIFT 16 | 434 | #define IXGBE_MSRWD_READ_DATA_SHIFT 16 |
418 | 435 | ||
419 | /* Atlas registers */ | 436 | /* Atlas registers */ |
420 | #define IXGBE_ATLAS_PDN_LPBK 0x24 | 437 | #define IXGBE_ATLAS_PDN_LPBK 0x24 |
@@ -429,6 +446,7 @@ | |||
429 | #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 | 446 | #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 |
430 | #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 | 447 | #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 |
431 | 448 | ||
449 | |||
432 | /* Device Type definitions for new protocol MDIO commands */ | 450 | /* Device Type definitions for new protocol MDIO commands */ |
433 | #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 | 451 | #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 |
434 | #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 | 452 | #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 |
@@ -436,6 +454,8 @@ | |||
436 | #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 | 454 | #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 |
437 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ | 455 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ |
438 | 456 | ||
457 | #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ | ||
458 | |||
439 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ | 459 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ |
440 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ | 460 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ |
441 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ | 461 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ |
@@ -449,23 +469,39 @@ | |||
449 | #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ | 469 | #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ |
450 | #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ | 470 | #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ |
451 | #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ | 471 | #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ |
452 | #define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ | 472 | #define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ |
453 | #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ | 473 | #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ |
454 | #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ | 474 | #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ |
455 | 475 | ||
476 | #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */ | ||
477 | #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ | ||
478 | #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ | ||
479 | |||
480 | /* MII clause 22/28 definitions */ | ||
481 | #define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 | ||
482 | |||
483 | #define IXGBE_MII_SPEED_SELECTION_REG 0x10 | ||
484 | #define IXGBE_MII_RESTART 0x200 | ||
485 | #define IXGBE_MII_AUTONEG_COMPLETE 0x20 | ||
486 | #define IXGBE_MII_AUTONEG_REG 0x0 | ||
487 | |||
456 | #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 | 488 | #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 |
457 | #define IXGBE_MAX_PHY_ADDR 32 | 489 | #define IXGBE_MAX_PHY_ADDR 32 |
458 | 490 | ||
459 | /* PHY IDs*/ | 491 | /* PHY IDs*/ |
460 | #define TN1010_PHY_ID 0x00A19410 | ||
461 | #define QT2022_PHY_ID 0x0043A400 | 492 | #define QT2022_PHY_ID 0x0043A400 |
462 | 493 | ||
494 | /* PHY Types */ | ||
495 | #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 | ||
496 | |||
463 | /* General purpose Interrupt Enable */ | 497 | /* General purpose Interrupt Enable */ |
464 | #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ | 498 | #define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ |
465 | #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ | 499 | #define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ |
466 | #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ | 500 | #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ |
467 | #define IXGBE_GPIE_EIAME 0x40000000 | 501 | #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ |
468 | #define IXGBE_GPIE_PBA_SUPPORT 0x80000000 | 502 | #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ |
503 | #define IXGBE_GPIE_EIAME 0x40000000 | ||
504 | #define IXGBE_GPIE_PBA_SUPPORT 0x80000000 | ||
469 | 505 | ||
470 | /* Transmit Flow Control status */ | 506 | /* Transmit Flow Control status */ |
471 | #define IXGBE_TFCS_TXOFF 0x00000001 | 507 | #define IXGBE_TFCS_TXOFF 0x00000001 |
@@ -526,7 +562,7 @@ | |||
526 | #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ | 562 | #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ |
527 | 563 | ||
528 | /* RMCS Bit Masks */ | 564 | /* RMCS Bit Masks */ |
529 | #define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ | 565 | #define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ |
530 | /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ | 566 | /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ |
531 | #define IXGBE_RMCS_RAC 0x00000004 | 567 | #define IXGBE_RMCS_RAC 0x00000004 |
532 | #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ | 568 | #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ |
@@ -534,12 +570,15 @@ | |||
534 | #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ | 570 | #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ |
535 | #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ | 571 | #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ |
536 | 572 | ||
573 | |||
537 | /* Interrupt register bitmasks */ | 574 | /* Interrupt register bitmasks */ |
538 | 575 | ||
539 | /* Extended Interrupt Cause Read */ | 576 | /* Extended Interrupt Cause Read */ |
540 | #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ | 577 | #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ |
541 | #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ | 578 | #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ |
542 | #define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ | 579 | #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ |
580 | #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ | ||
581 | #define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ | ||
543 | #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ | 582 | #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ |
544 | #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ | 583 | #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ |
545 | #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ | 584 | #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ |
@@ -547,11 +586,12 @@ | |||
547 | 586 | ||
548 | /* Extended Interrupt Cause Set */ | 587 | /* Extended Interrupt Cause Set */ |
549 | #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ | 588 | #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ |
550 | #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ | 589 | #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ |
551 | #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ | 590 | #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ |
552 | #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ | 591 | #define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ |
553 | #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ | 592 | #define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ |
554 | #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ | 593 | #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ |
594 | #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ | ||
555 | #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ | 595 | #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ |
556 | #define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ | 596 | #define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ |
557 | 597 | ||
@@ -559,7 +599,9 @@ | |||
559 | #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ | 599 | #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ |
560 | #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ | 600 | #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ |
561 | #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ | 601 | #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ |
562 | #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ | 602 | #define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ |
603 | #define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ | ||
604 | #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ | ||
563 | #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ | 605 | #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ |
564 | #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ | 606 | #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ |
565 | #define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ | 607 | #define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ |
@@ -568,18 +610,20 @@ | |||
568 | #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ | 610 | #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ |
569 | #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ | 611 | #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ |
570 | #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ | 612 | #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ |
571 | #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ | 613 | #define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ |
572 | #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ | 614 | #define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ |
615 | #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ | ||
616 | #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ | ||
573 | #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ | 617 | #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ |
574 | #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ | 618 | #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ |
575 | 619 | ||
576 | #define IXGBE_EIMS_ENABLE_MASK (\ | 620 | #define IXGBE_EIMS_ENABLE_MASK ( \ |
577 | IXGBE_EIMS_RTX_QUEUE | \ | 621 | IXGBE_EIMS_RTX_QUEUE | \ |
578 | IXGBE_EIMS_LSC | \ | 622 | IXGBE_EIMS_LSC | \ |
579 | IXGBE_EIMS_TCP_TIMER | \ | 623 | IXGBE_EIMS_TCP_TIMER | \ |
580 | IXGBE_EIMS_OTHER) | 624 | IXGBE_EIMS_OTHER) |
581 | 625 | ||
582 | /* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ | 626 | /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ |
583 | #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ | 627 | #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ |
584 | #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ | 628 | #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ |
585 | #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ | 629 | #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ |
@@ -616,6 +660,7 @@ | |||
616 | #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ | 660 | #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ |
617 | #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ | 661 | #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ |
618 | 662 | ||
663 | |||
619 | #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ | 664 | #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ |
620 | 665 | ||
621 | /* STATUS Bit Masks */ | 666 | /* STATUS Bit Masks */ |
@@ -663,16 +708,16 @@ | |||
663 | #define IXGBE_AUTOC_AN_RESTART 0x00001000 | 708 | #define IXGBE_AUTOC_AN_RESTART 0x00001000 |
664 | #define IXGBE_AUTOC_FLU 0x00000001 | 709 | #define IXGBE_AUTOC_FLU 0x00000001 |
665 | #define IXGBE_AUTOC_LMS_SHIFT 13 | 710 | #define IXGBE_AUTOC_LMS_SHIFT 13 |
666 | #define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) | 711 | #define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) |
667 | #define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) | 712 | #define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) |
668 | #define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) | 713 | #define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) |
669 | #define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) | 714 | #define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) |
670 | #define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) | 715 | #define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) |
671 | #define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) | 716 | #define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) |
672 | #define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) | 717 | #define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) |
673 | 718 | ||
674 | #define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 | 719 | #define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 |
675 | #define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 | 720 | #define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 |
676 | #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 | 721 | #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 |
677 | #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 | 722 | #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 |
678 | #define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) | 723 | #define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) |
@@ -753,6 +798,11 @@ | |||
753 | #define IXGBE_PBANUM0_PTR 0x15 | 798 | #define IXGBE_PBANUM0_PTR 0x15 |
754 | #define IXGBE_PBANUM1_PTR 0x16 | 799 | #define IXGBE_PBANUM1_PTR 0x16 |
755 | 800 | ||
801 | /* Legacy EEPROM word offsets */ | ||
802 | #define IXGBE_ISCSI_BOOT_CAPS 0x0033 | ||
803 | #define IXGBE_ISCSI_SETUP_PORT_0 0x0030 | ||
804 | #define IXGBE_ISCSI_SETUP_PORT_1 0x0034 | ||
805 | |||
756 | /* EEPROM Commands - SPI */ | 806 | /* EEPROM Commands - SPI */ |
757 | #define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ | 807 | #define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ |
758 | #define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 | 808 | #define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 |
@@ -760,7 +810,7 @@ | |||
760 | #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ | 810 | #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ |
761 | #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ | 811 | #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ |
762 | #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ | 812 | #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ |
763 | /* EEPROM reset Write Enbale latch */ | 813 | /* EEPROM reset Write Enable latch */ |
764 | #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 | 814 | #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 |
765 | #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ | 815 | #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ |
766 | #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ | 816 | #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ |
@@ -799,22 +849,20 @@ | |||
799 | /* Number of 100 microseconds we wait for PCI Express master disable */ | 849 | /* Number of 100 microseconds we wait for PCI Express master disable */ |
800 | #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 | 850 | #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 |
801 | 851 | ||
802 | /* PHY Types */ | ||
803 | #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 | ||
804 | |||
805 | /* Check whether address is multicast. This is little-endian specific check.*/ | 852 | /* Check whether address is multicast. This is little-endian specific check.*/ |
806 | #define IXGBE_IS_MULTICAST(Address) \ | 853 | #define IXGBE_IS_MULTICAST(Address) \ |
807 | (bool)(((u8 *)(Address))[0] & ((u8)0x01)) | 854 | (bool)(((u8 *)(Address))[0] & ((u8)0x01)) |
808 | 855 | ||
809 | /* Check whether an address is broadcast. */ | 856 | /* Check whether an address is broadcast. */ |
810 | #define IXGBE_IS_BROADCAST(Address) \ | 857 | #define IXGBE_IS_BROADCAST(Address) \ |
811 | ((((u8 *)(Address))[0] == ((u8)0xff)) && \ | 858 | ((((u8 *)(Address))[0] == ((u8)0xff)) && \ |
812 | (((u8 *)(Address))[1] == ((u8)0xff))) | 859 | (((u8 *)(Address))[1] == ((u8)0xff))) |
813 | 860 | ||
814 | /* RAH */ | 861 | /* RAH */ |
815 | #define IXGBE_RAH_VIND_MASK 0x003C0000 | 862 | #define IXGBE_RAH_VIND_MASK 0x003C0000 |
816 | #define IXGBE_RAH_VIND_SHIFT 18 | 863 | #define IXGBE_RAH_VIND_SHIFT 18 |
817 | #define IXGBE_RAH_AV 0x80000000 | 864 | #define IXGBE_RAH_AV 0x80000000 |
865 | #define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF | ||
818 | 866 | ||
819 | /* Header split receive */ | 867 | /* Header split receive */ |
820 | #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 | 868 | #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 |
@@ -843,7 +891,7 @@ | |||
843 | #define IXGBE_MAX_FRAME_SZ 0x40040000 | 891 | #define IXGBE_MAX_FRAME_SZ 0x40040000 |
844 | 892 | ||
845 | #define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ | 893 | #define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ |
846 | #define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ | 894 | #define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ |
847 | 895 | ||
848 | /* Receive Config masks */ | 896 | /* Receive Config masks */ |
849 | #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ | 897 | #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ |
@@ -856,7 +904,7 @@ | |||
856 | #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ | 904 | #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ |
857 | #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ | 905 | #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ |
858 | #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ | 906 | #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ |
859 | /* Receive Priority Flow Control Enbale */ | 907 | /* Receive Priority Flow Control Enable */ |
860 | #define IXGBE_FCTRL_RPFCE 0x00004000 | 908 | #define IXGBE_FCTRL_RPFCE 0x00004000 |
861 | #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ | 909 | #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ |
862 | 910 | ||
@@ -886,9 +934,8 @@ | |||
886 | /* Receive Descriptor bit definitions */ | 934 | /* Receive Descriptor bit definitions */ |
887 | #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ | 935 | #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ |
888 | #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ | 936 | #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ |
889 | #define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */ | ||
890 | #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | 937 | #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ |
891 | #define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ | 938 | #define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ |
892 | #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ | 939 | #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ |
893 | #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ | 940 | #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ |
894 | #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ | 941 | #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ |
@@ -904,7 +951,7 @@ | |||
904 | #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ | 951 | #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ |
905 | #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ | 952 | #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ |
906 | #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ | 953 | #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ |
907 | #define IXGBE_RXDADV_HBO 0x00800000 | 954 | #define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ |
908 | #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ | 955 | #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ |
909 | #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ | 956 | #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ |
910 | #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ | 957 | #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ |
@@ -918,15 +965,17 @@ | |||
918 | #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ | 965 | #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ |
919 | #define IXGBE_RXD_CFI_SHIFT 12 | 966 | #define IXGBE_RXD_CFI_SHIFT 12 |
920 | 967 | ||
968 | |||
921 | /* SRRCTL bit definitions */ | 969 | /* SRRCTL bit definitions */ |
922 | #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ | 970 | #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ |
923 | #define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F | 971 | #define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F |
924 | #define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 | 972 | #define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 |
925 | #define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 | 973 | #define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 |
926 | #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 | 974 | #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 |
927 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 | 975 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 |
928 | #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 | 976 | #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 |
929 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 | 977 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 |
978 | #define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 | ||
930 | 979 | ||
931 | #define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 | 980 | #define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 |
932 | #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF | 981 | #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF |
@@ -960,21 +1009,20 @@ | |||
960 | #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ | 1009 | #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ |
961 | #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ | 1010 | #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ |
962 | #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ | 1011 | #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ |
963 | |||
964 | /* Masks to determine if packets should be dropped due to frame errors */ | 1012 | /* Masks to determine if packets should be dropped due to frame errors */ |
965 | #define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ | 1013 | #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ |
966 | IXGBE_RXD_ERR_CE | \ | 1014 | IXGBE_RXD_ERR_CE | \ |
967 | IXGBE_RXD_ERR_LE | \ | 1015 | IXGBE_RXD_ERR_LE | \ |
968 | IXGBE_RXD_ERR_PE | \ | 1016 | IXGBE_RXD_ERR_PE | \ |
969 | IXGBE_RXD_ERR_OSE | \ | 1017 | IXGBE_RXD_ERR_OSE | \ |
970 | IXGBE_RXD_ERR_USE) | 1018 | IXGBE_RXD_ERR_USE) |
971 | 1019 | ||
972 | #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ | 1020 | #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ |
973 | IXGBE_RXDADV_ERR_CE | \ | 1021 | IXGBE_RXDADV_ERR_CE | \ |
974 | IXGBE_RXDADV_ERR_LE | \ | 1022 | IXGBE_RXDADV_ERR_LE | \ |
975 | IXGBE_RXDADV_ERR_PE | \ | 1023 | IXGBE_RXDADV_ERR_PE | \ |
976 | IXGBE_RXDADV_ERR_OSE | \ | 1024 | IXGBE_RXDADV_ERR_OSE | \ |
977 | IXGBE_RXDADV_ERR_USE) | 1025 | IXGBE_RXDADV_ERR_USE) |
978 | 1026 | ||
979 | /* Multicast bit mask */ | 1027 | /* Multicast bit mask */ |
980 | #define IXGBE_MCSTCTRL_MFE 0x4 | 1028 | #define IXGBE_MCSTCTRL_MFE 0x4 |
@@ -990,6 +1038,7 @@ | |||
990 | #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ | 1038 | #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ |
991 | #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT | 1039 | #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT |
992 | 1040 | ||
1041 | |||
993 | /* Transmit Descriptor - Legacy */ | 1042 | /* Transmit Descriptor - Legacy */ |
994 | struct ixgbe_legacy_tx_desc { | 1043 | struct ixgbe_legacy_tx_desc { |
995 | u64 buffer_addr; /* Address of the descriptor's data buffer */ | 1044 | u64 buffer_addr; /* Address of the descriptor's data buffer */ |
@@ -1004,8 +1053,8 @@ struct ixgbe_legacy_tx_desc { | |||
1004 | union { | 1053 | union { |
1005 | __le32 data; | 1054 | __le32 data; |
1006 | struct { | 1055 | struct { |
1007 | u8 status; /* Descriptor status */ | 1056 | u8 status; /* Descriptor status */ |
1008 | u8 css; /* Checksum start */ | 1057 | u8 css; /* Checksum start */ |
1009 | __le16 vlan; | 1058 | __le16 vlan; |
1010 | } fields; | 1059 | } fields; |
1011 | } upper; | 1060 | } upper; |
@@ -1014,7 +1063,7 @@ struct ixgbe_legacy_tx_desc { | |||
1014 | /* Transmit Descriptor - Advanced */ | 1063 | /* Transmit Descriptor - Advanced */ |
1015 | union ixgbe_adv_tx_desc { | 1064 | union ixgbe_adv_tx_desc { |
1016 | struct { | 1065 | struct { |
1017 | __le64 buffer_addr; /* Address of descriptor's data buf */ | 1066 | __le64 buffer_addr; /* Address of descriptor's data buf */ |
1018 | __le32 cmd_type_len; | 1067 | __le32 cmd_type_len; |
1019 | __le32 olinfo_status; | 1068 | __le32 olinfo_status; |
1020 | } read; | 1069 | } read; |
@@ -1046,8 +1095,8 @@ union ixgbe_adv_rx_desc { | |||
1046 | union { | 1095 | union { |
1047 | __le32 data; | 1096 | __le32 data; |
1048 | struct { | 1097 | struct { |
1049 | __le16 pkt_info; /* RSS type, Packet type */ | 1098 | __le16 pkt_info; /* RSS, Pkt type */ |
1050 | __le16 hdr_info; /* Split Header, header len */ | 1099 | __le16 hdr_info; /* Splithdr, hdrlen */ |
1051 | } hs_rss; | 1100 | } hs_rss; |
1052 | } lo_dword; | 1101 | } lo_dword; |
1053 | union { | 1102 | union { |
@@ -1075,49 +1124,69 @@ struct ixgbe_adv_tx_context_desc { | |||
1075 | }; | 1124 | }; |
1076 | 1125 | ||
1077 | /* Adv Transmit Descriptor Config Masks */ | 1126 | /* Adv Transmit Descriptor Config Masks */ |
1078 | #define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ | 1127 | #define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ |
1079 | #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ | 1128 | #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ |
1080 | #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ | 1129 | #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ |
1081 | #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ | 1130 | #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ |
1082 | #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ | 1131 | #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ |
1083 | #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ | 1132 | #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ |
1084 | #define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ | ||
1085 | #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ | 1133 | #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ |
1086 | #define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ | 1134 | #define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ |
1087 | #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ | 1135 | #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ |
1088 | #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ | 1136 | #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ |
1089 | #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ | 1137 | #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ |
1090 | #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ | 1138 | #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ |
1091 | #define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ | 1139 | #define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ |
1092 | #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ | 1140 | #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ |
1093 | #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ | 1141 | #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ |
1142 | #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ | ||
1094 | #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ | 1143 | #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ |
1095 | #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ | 1144 | #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ |
1096 | IXGBE_ADVTXD_POPTS_SHIFT) | 1145 | IXGBE_ADVTXD_POPTS_SHIFT) |
1097 | #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ | 1146 | #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ |
1098 | IXGBE_ADVTXD_POPTS_SHIFT) | 1147 | IXGBE_ADVTXD_POPTS_SHIFT) |
1099 | #define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ | 1148 | #define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ |
1100 | #define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ | 1149 | #define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ |
1101 | #define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ | 1150 | #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ |
1102 | #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ | 1151 | #define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ |
1103 | #define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ | 1152 | #define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ |
1104 | #define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ | 1153 | #define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ |
1105 | #define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ | 1154 | #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ |
1106 | #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ | 1155 | #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ |
1107 | #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ | 1156 | #define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ |
1108 | #define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ | 1157 | #define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ |
1109 | #define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ | 1158 | #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ |
1110 | #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ | 1159 | #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ |
1111 | #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ | 1160 | #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ |
1112 | #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ | 1161 | #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ |
1113 | #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ | 1162 | #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ |
1114 | #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ | 1163 | #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ |
1115 | 1164 | ||
1165 | /* Autonegotiation advertised speeds */ | ||
1166 | typedef u32 ixgbe_autoneg_advertised; | ||
1116 | /* Link speed */ | 1167 | /* Link speed */ |
1168 | typedef u32 ixgbe_link_speed; | ||
1117 | #define IXGBE_LINK_SPEED_UNKNOWN 0 | 1169 | #define IXGBE_LINK_SPEED_UNKNOWN 0 |
1118 | #define IXGBE_LINK_SPEED_100_FULL 0x0008 | 1170 | #define IXGBE_LINK_SPEED_100_FULL 0x0008 |
1119 | #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 | 1171 | #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 |
1120 | #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 | 1172 | #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 |
1173 | #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ | ||
1174 | IXGBE_LINK_SPEED_10GB_FULL) | ||
1175 | |||
1176 | /* Physical layer type */ | ||
1177 | typedef u32 ixgbe_physical_layer; | ||
1178 | #define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 | ||
1179 | #define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 | ||
1180 | #define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 | ||
1181 | #define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004 | ||
1182 | #define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 | ||
1183 | #define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 | ||
1184 | #define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 | ||
1185 | #define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 | ||
1186 | #define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 | ||
1187 | #define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 | ||
1188 | #define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 | ||
1189 | #define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 | ||
1121 | 1190 | ||
1122 | 1191 | ||
1123 | enum ixgbe_eeprom_type { | 1192 | enum ixgbe_eeprom_type { |
@@ -1134,16 +1203,38 @@ enum ixgbe_mac_type { | |||
1134 | 1203 | ||
1135 | enum ixgbe_phy_type { | 1204 | enum ixgbe_phy_type { |
1136 | ixgbe_phy_unknown = 0, | 1205 | ixgbe_phy_unknown = 0, |
1137 | ixgbe_phy_tn, | ||
1138 | ixgbe_phy_qt, | 1206 | ixgbe_phy_qt, |
1139 | ixgbe_phy_xaui | 1207 | ixgbe_phy_xaui, |
1208 | ixgbe_phy_tw_tyco, | ||
1209 | ixgbe_phy_tw_unknown, | ||
1210 | ixgbe_phy_sfp_avago, | ||
1211 | ixgbe_phy_sfp_ftl, | ||
1212 | ixgbe_phy_sfp_unknown, | ||
1213 | ixgbe_phy_generic | ||
1214 | }; | ||
1215 | |||
1216 | /* | ||
1217 | * SFP+ module type IDs: | ||
1218 | * | ||
1219 | * ID Module Type | ||
1220 | * ============= | ||
1221 | * 0 SFP_DA_CU | ||
1222 | * 1 SFP_SR | ||
1223 | * 2 SFP_LR | ||
1224 | */ | ||
1225 | enum ixgbe_sfp_type { | ||
1226 | ixgbe_sfp_type_da_cu = 0, | ||
1227 | ixgbe_sfp_type_sr = 1, | ||
1228 | ixgbe_sfp_type_lr = 2, | ||
1229 | ixgbe_sfp_type_unknown = 0xFFFF | ||
1140 | }; | 1230 | }; |
1141 | 1231 | ||
1142 | enum ixgbe_media_type { | 1232 | enum ixgbe_media_type { |
1143 | ixgbe_media_type_unknown = 0, | 1233 | ixgbe_media_type_unknown = 0, |
1144 | ixgbe_media_type_fiber, | 1234 | ixgbe_media_type_fiber, |
1145 | ixgbe_media_type_copper, | 1235 | ixgbe_media_type_copper, |
1146 | ixgbe_media_type_backplane | 1236 | ixgbe_media_type_backplane, |
1237 | ixgbe_media_type_virtual | ||
1147 | }; | 1238 | }; |
1148 | 1239 | ||
1149 | /* Flow Control Settings */ | 1240 | /* Flow Control Settings */ |
@@ -1241,59 +1332,114 @@ struct ixgbe_hw; | |||
1241 | typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, | 1332 | typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, |
1242 | u32 *vmdq); | 1333 | u32 *vmdq); |
1243 | 1334 | ||
1335 | /* Function pointer table */ | ||
1336 | struct ixgbe_eeprom_operations { | ||
1337 | s32 (*init_params)(struct ixgbe_hw *); | ||
1338 | s32 (*read)(struct ixgbe_hw *, u16, u16 *); | ||
1339 | s32 (*write)(struct ixgbe_hw *, u16, u16); | ||
1340 | s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); | ||
1341 | s32 (*update_checksum)(struct ixgbe_hw *); | ||
1342 | }; | ||
1343 | |||
1244 | struct ixgbe_mac_operations { | 1344 | struct ixgbe_mac_operations { |
1245 | s32 (*reset)(struct ixgbe_hw *); | 1345 | s32 (*init_hw)(struct ixgbe_hw *); |
1346 | s32 (*reset_hw)(struct ixgbe_hw *); | ||
1347 | s32 (*start_hw)(struct ixgbe_hw *); | ||
1348 | s32 (*clear_hw_cntrs)(struct ixgbe_hw *); | ||
1246 | enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); | 1349 | enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); |
1350 | s32 (*get_supported_physical_layer)(struct ixgbe_hw *); | ||
1351 | s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); | ||
1352 | s32 (*stop_adapter)(struct ixgbe_hw *); | ||
1353 | s32 (*get_bus_info)(struct ixgbe_hw *); | ||
1354 | s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); | ||
1355 | s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); | ||
1356 | |||
1357 | /* Link */ | ||
1247 | s32 (*setup_link)(struct ixgbe_hw *); | 1358 | s32 (*setup_link)(struct ixgbe_hw *); |
1248 | s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *, bool); | 1359 | s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, |
1249 | s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); | 1360 | bool); |
1250 | s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); | 1361 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); |
1362 | s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, | ||
1363 | bool *); | ||
1364 | |||
1365 | /* LED */ | ||
1366 | s32 (*led_on)(struct ixgbe_hw *, u32); | ||
1367 | s32 (*led_off)(struct ixgbe_hw *, u32); | ||
1368 | s32 (*blink_led_start)(struct ixgbe_hw *, u32); | ||
1369 | s32 (*blink_led_stop)(struct ixgbe_hw *, u32); | ||
1370 | |||
1371 | /* RAR, Multicast, VLAN */ | ||
1372 | s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); | ||
1373 | s32 (*clear_rar)(struct ixgbe_hw *, u32); | ||
1374 | s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); | ||
1375 | s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); | ||
1376 | s32 (*init_rx_addrs)(struct ixgbe_hw *); | ||
1377 | s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, | ||
1378 | ixgbe_mc_addr_itr); | ||
1379 | s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, | ||
1380 | ixgbe_mc_addr_itr); | ||
1381 | s32 (*enable_mc)(struct ixgbe_hw *); | ||
1382 | s32 (*disable_mc)(struct ixgbe_hw *); | ||
1383 | s32 (*clear_vfta)(struct ixgbe_hw *); | ||
1384 | s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); | ||
1385 | s32 (*init_uta_tables)(struct ixgbe_hw *); | ||
1386 | |||
1387 | /* Flow Control */ | ||
1388 | s32 (*setup_fc)(struct ixgbe_hw *, s32); | ||
1251 | }; | 1389 | }; |
1252 | 1390 | ||
1253 | struct ixgbe_phy_operations { | 1391 | struct ixgbe_phy_operations { |
1392 | s32 (*identify)(struct ixgbe_hw *); | ||
1393 | s32 (*identify_sfp)(struct ixgbe_hw *); | ||
1394 | s32 (*reset)(struct ixgbe_hw *); | ||
1395 | s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); | ||
1396 | s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); | ||
1254 | s32 (*setup_link)(struct ixgbe_hw *); | 1397 | s32 (*setup_link)(struct ixgbe_hw *); |
1255 | s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); | 1398 | s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, |
1256 | s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); | 1399 | bool); |
1257 | }; | 1400 | s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); |
1258 | 1401 | s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); | |
1259 | struct ixgbe_mac_info { | 1402 | s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); |
1260 | struct ixgbe_mac_operations ops; | 1403 | s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); |
1261 | enum ixgbe_mac_type type; | ||
1262 | u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; | ||
1263 | u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; | ||
1264 | s32 mc_filter_type; | ||
1265 | u32 mcft_size; | ||
1266 | u32 vft_size; | ||
1267 | u32 num_rar_entries; | ||
1268 | u32 num_rx_queues; | ||
1269 | u32 num_tx_queues; | ||
1270 | u32 link_attach_type; | ||
1271 | u32 link_mode_select; | ||
1272 | bool link_settings_loaded; | ||
1273 | }; | 1404 | }; |
1274 | 1405 | ||
1275 | struct ixgbe_eeprom_info { | 1406 | struct ixgbe_eeprom_info { |
1276 | enum ixgbe_eeprom_type type; | 1407 | struct ixgbe_eeprom_operations ops; |
1277 | u16 word_size; | 1408 | enum ixgbe_eeprom_type type; |
1278 | u16 address_bits; | 1409 | u32 semaphore_delay; |
1410 | u16 word_size; | ||
1411 | u16 address_bits; | ||
1279 | }; | 1412 | }; |
1280 | 1413 | ||
1281 | struct ixgbe_phy_info { | 1414 | struct ixgbe_mac_info { |
1282 | struct ixgbe_phy_operations ops; | 1415 | struct ixgbe_mac_operations ops; |
1283 | 1416 | enum ixgbe_mac_type type; | |
1284 | enum ixgbe_phy_type type; | 1417 | u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; |
1285 | u32 addr; | 1418 | u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; |
1286 | u32 id; | 1419 | s32 mc_filter_type; |
1287 | u32 revision; | 1420 | u32 mcft_size; |
1288 | enum ixgbe_media_type media_type; | 1421 | u32 vft_size; |
1289 | u32 autoneg_advertised; | 1422 | u32 num_rar_entries; |
1290 | bool autoneg_wait_to_complete; | 1423 | u32 max_tx_queues; |
1424 | u32 max_rx_queues; | ||
1425 | u32 link_attach_type; | ||
1426 | u32 link_mode_select; | ||
1427 | bool link_settings_loaded; | ||
1428 | bool autoneg; | ||
1429 | bool autoneg_failed; | ||
1291 | }; | 1430 | }; |
1292 | 1431 | ||
1293 | struct ixgbe_info { | 1432 | struct ixgbe_phy_info { |
1294 | enum ixgbe_mac_type mac; | 1433 | struct ixgbe_phy_operations ops; |
1295 | s32 (*get_invariants)(struct ixgbe_hw *); | 1434 | enum ixgbe_phy_type type; |
1296 | struct ixgbe_mac_operations *mac_ops; | 1435 | u32 addr; |
1436 | u32 id; | ||
1437 | enum ixgbe_sfp_type sfp_type; | ||
1438 | u32 revision; | ||
1439 | enum ixgbe_media_type media_type; | ||
1440 | bool reset_disable; | ||
1441 | ixgbe_autoneg_advertised autoneg_advertised; | ||
1442 | bool autoneg_wait_to_complete; | ||
1297 | }; | 1443 | }; |
1298 | 1444 | ||
1299 | struct ixgbe_hw { | 1445 | struct ixgbe_hw { |
@@ -1312,6 +1458,15 @@ struct ixgbe_hw { | |||
1312 | bool adapter_stopped; | 1458 | bool adapter_stopped; |
1313 | }; | 1459 | }; |
1314 | 1460 | ||
1461 | struct ixgbe_info { | ||
1462 | enum ixgbe_mac_type mac; | ||
1463 | s32 (*get_invariants)(struct ixgbe_hw *); | ||
1464 | struct ixgbe_mac_operations *mac_ops; | ||
1465 | struct ixgbe_eeprom_operations *eeprom_ops; | ||
1466 | struct ixgbe_phy_operations *phy_ops; | ||
1467 | }; | ||
1468 | |||
1469 | |||
1315 | /* Error Codes */ | 1470 | /* Error Codes */ |
1316 | #define IXGBE_ERR_EEPROM -1 | 1471 | #define IXGBE_ERR_EEPROM -1 |
1317 | #define IXGBE_ERR_EEPROM_CHECKSUM -2 | 1472 | #define IXGBE_ERR_EEPROM_CHECKSUM -2 |
@@ -1330,6 +1485,8 @@ struct ixgbe_hw { | |||
1330 | #define IXGBE_ERR_RESET_FAILED -15 | 1485 | #define IXGBE_ERR_RESET_FAILED -15 |
1331 | #define IXGBE_ERR_SWFW_SYNC -16 | 1486 | #define IXGBE_ERR_SWFW_SYNC -16 |
1332 | #define IXGBE_ERR_PHY_ADDR_INVALID -17 | 1487 | #define IXGBE_ERR_PHY_ADDR_INVALID -17 |
1488 | #define IXGBE_ERR_I2C -18 | ||
1489 | #define IXGBE_ERR_SFP_NOT_SUPPORTED -19 | ||
1333 | #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF | 1490 | #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF |
1334 | 1491 | ||
1335 | #endif /* _IXGBE_TYPE_H_ */ | 1492 | #endif /* _IXGBE_TYPE_H_ */ |