diff options
author | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
commit | b2576e1d4408e134e2188c967b1f28af39cd79d4 (patch) | |
tree | 004f3c82faab760f304ce031d6d2f572e7746a50 /drivers/net/ixgbe | |
parent | 3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff) | |
parent | 2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff) |
Merge branch 'linus' into release
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 32 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82598.c | 192 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb.c | 332 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb.h | 184 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82598.c | 398 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82598.h | 94 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_nl.c | 641 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 134 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 485 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 326 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.h | 25 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 28 |
13 files changed, 2743 insertions, 130 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index ccd83d9f579e..6e7ef765bcd8 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile | |||
@@ -34,3 +34,5 @@ obj-$(CONFIG_IXGBE) += ixgbe.o | |||
34 | 34 | ||
35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ | 35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ |
36 | ixgbe_82598.o ixgbe_phy.o | 36 | ixgbe_82598.o ixgbe_phy.o |
37 | |||
38 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o | ||
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index e116d340dcc6..e112008f39c1 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -32,10 +32,11 @@ | |||
32 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
33 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
34 | #include <linux/inet_lro.h> | 34 | #include <linux/inet_lro.h> |
35 | #include <linux/aer.h> | ||
35 | 36 | ||
36 | #include "ixgbe_type.h" | 37 | #include "ixgbe_type.h" |
37 | #include "ixgbe_common.h" | 38 | #include "ixgbe_common.h" |
38 | 39 | #include "ixgbe_dcb.h" | |
39 | #ifdef CONFIG_IXGBE_DCA | 40 | #ifdef CONFIG_IXGBE_DCA |
40 | #include <linux/dca.h> | 41 | #include <linux/dca.h> |
41 | #endif | 42 | #endif |
@@ -84,6 +85,7 @@ | |||
84 | #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) | 85 | #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) |
85 | #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) | 86 | #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) |
86 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 | 87 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 |
88 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 | ||
87 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | 89 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
88 | 90 | ||
89 | #define IXGBE_MAX_LRO_DESCRIPTORS 8 | 91 | #define IXGBE_MAX_LRO_DESCRIPTORS 8 |
@@ -134,7 +136,7 @@ struct ixgbe_ring { | |||
134 | 136 | ||
135 | u16 reg_idx; /* holds the special value that gets the hardware register | 137 | u16 reg_idx; /* holds the special value that gets the hardware register |
136 | * offset associated with this ring, which is different | 138 | * offset associated with this ring, which is different |
137 | * for DCE and RSS modes */ | 139 | * for DCB and RSS modes */ |
138 | 140 | ||
139 | #ifdef CONFIG_IXGBE_DCA | 141 | #ifdef CONFIG_IXGBE_DCA |
140 | /* cpu for tx queue */ | 142 | /* cpu for tx queue */ |
@@ -152,8 +154,10 @@ struct ixgbe_ring { | |||
152 | u16 rx_buf_len; | 154 | u16 rx_buf_len; |
153 | }; | 155 | }; |
154 | 156 | ||
157 | #define RING_F_DCB 0 | ||
155 | #define RING_F_VMDQ 1 | 158 | #define RING_F_VMDQ 1 |
156 | #define RING_F_RSS 2 | 159 | #define RING_F_RSS 2 |
160 | #define IXGBE_MAX_DCB_INDICES 8 | ||
157 | #define IXGBE_MAX_RSS_INDICES 16 | 161 | #define IXGBE_MAX_RSS_INDICES 16 |
158 | #define IXGBE_MAX_VMDQ_INDICES 16 | 162 | #define IXGBE_MAX_VMDQ_INDICES 16 |
159 | struct ixgbe_ring_feature { | 163 | struct ixgbe_ring_feature { |
@@ -164,6 +168,10 @@ struct ixgbe_ring_feature { | |||
164 | #define MAX_RX_QUEUES 64 | 168 | #define MAX_RX_QUEUES 64 |
165 | #define MAX_TX_QUEUES 32 | 169 | #define MAX_TX_QUEUES 32 |
166 | 170 | ||
171 | #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ | ||
172 | ? 8 : 1) | ||
173 | #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS | ||
174 | |||
167 | /* MAX_MSIX_Q_VECTORS of these are allocated, | 175 | /* MAX_MSIX_Q_VECTORS of these are allocated, |
168 | * but we only use one per queue-specific vector. | 176 | * but we only use one per queue-specific vector. |
169 | */ | 177 | */ |
@@ -215,6 +223,9 @@ struct ixgbe_adapter { | |||
215 | struct work_struct reset_task; | 223 | struct work_struct reset_task; |
216 | struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; | 224 | struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; |
217 | char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; | 225 | char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; |
226 | struct ixgbe_dcb_config dcb_cfg; | ||
227 | struct ixgbe_dcb_config temp_dcb_cfg; | ||
228 | u8 dcb_set_bitmap; | ||
218 | 229 | ||
219 | /* Interrupt Throttle Rate */ | 230 | /* Interrupt Throttle Rate */ |
220 | u32 itr_setting; | 231 | u32 itr_setting; |
@@ -267,8 +278,10 @@ struct ixgbe_adapter { | |||
267 | #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) | 278 | #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) |
268 | #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) | 279 | #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) |
269 | #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) | 280 | #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) |
281 | #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) | ||
270 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) | 282 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) |
271 | #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) | 283 | #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) |
284 | #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24) | ||
272 | 285 | ||
273 | /* default to trying for four seconds */ | 286 | /* default to trying for four seconds */ |
274 | #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) | 287 | #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) |
@@ -299,12 +312,15 @@ struct ixgbe_adapter { | |||
299 | unsigned long link_check_timeout; | 312 | unsigned long link_check_timeout; |
300 | 313 | ||
301 | struct work_struct watchdog_task; | 314 | struct work_struct watchdog_task; |
315 | struct work_struct sfp_task; | ||
316 | struct timer_list sfp_timer; | ||
302 | }; | 317 | }; |
303 | 318 | ||
304 | enum ixbge_state_t { | 319 | enum ixbge_state_t { |
305 | __IXGBE_TESTING, | 320 | __IXGBE_TESTING, |
306 | __IXGBE_RESETTING, | 321 | __IXGBE_RESETTING, |
307 | __IXGBE_DOWN | 322 | __IXGBE_DOWN, |
323 | __IXGBE_SFP_MODULE_NOT_FOUND | ||
308 | }; | 324 | }; |
309 | 325 | ||
310 | enum ixgbe_boards { | 326 | enum ixgbe_boards { |
@@ -312,6 +328,12 @@ enum ixgbe_boards { | |||
312 | }; | 328 | }; |
313 | 329 | ||
314 | extern struct ixgbe_info ixgbe_82598_info; | 330 | extern struct ixgbe_info ixgbe_82598_info; |
331 | #ifdef CONFIG_IXGBE_DCB | ||
332 | extern struct dcbnl_rtnl_ops dcbnl_ops; | ||
333 | extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, | ||
334 | struct ixgbe_dcb_config *dst_dcb_cfg, | ||
335 | int tc_max); | ||
336 | #endif | ||
315 | 337 | ||
316 | extern char ixgbe_driver_name[]; | 338 | extern char ixgbe_driver_name[]; |
317 | extern const char ixgbe_driver_version[]; | 339 | extern const char ixgbe_driver_version[]; |
@@ -326,5 +348,9 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *) | |||
326 | extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 348 | extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); |
327 | extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 349 | extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); |
328 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); | 350 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); |
351 | extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); | ||
352 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); | ||
353 | void ixgbe_napi_add_all(struct ixgbe_adapter *adapter); | ||
354 | void ixgbe_napi_del_all(struct ixgbe_adapter *adapter); | ||
329 | 355 | ||
330 | #endif /* _IXGBE_H_ */ | 356 | #endif /* _IXGBE_H_ */ |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 7cddcfba809e..ad5699d9ab0d 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -46,6 +46,8 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, | |||
46 | ixgbe_link_speed speed, | 46 | ixgbe_link_speed speed, |
47 | bool autoneg, | 47 | bool autoneg, |
48 | bool autoneg_wait_to_complete); | 48 | bool autoneg_wait_to_complete); |
49 | static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, | ||
50 | u8 *eeprom_data); | ||
49 | 51 | ||
50 | /** | 52 | /** |
51 | */ | 53 | */ |
@@ -53,12 +55,40 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | |||
53 | { | 55 | { |
54 | struct ixgbe_mac_info *mac = &hw->mac; | 56 | struct ixgbe_mac_info *mac = &hw->mac; |
55 | struct ixgbe_phy_info *phy = &hw->phy; | 57 | struct ixgbe_phy_info *phy = &hw->phy; |
58 | s32 ret_val = 0; | ||
59 | u16 list_offset, data_offset; | ||
56 | 60 | ||
57 | /* Call PHY identify routine to get the phy type */ | 61 | /* Call PHY identify routine to get the phy type */ |
58 | ixgbe_identify_phy_generic(hw); | 62 | ixgbe_identify_phy_generic(hw); |
59 | 63 | ||
60 | /* PHY Init */ | 64 | /* PHY Init */ |
61 | switch (phy->type) { | 65 | switch (phy->type) { |
66 | case ixgbe_phy_tn: | ||
67 | phy->ops.check_link = &ixgbe_check_phy_link_tnx; | ||
68 | phy->ops.get_firmware_version = | ||
69 | &ixgbe_get_phy_firmware_version_tnx; | ||
70 | break; | ||
71 | case ixgbe_phy_nl: | ||
72 | phy->ops.reset = &ixgbe_reset_phy_nl; | ||
73 | |||
74 | /* Call SFP+ identify routine to get the SFP+ module type */ | ||
75 | ret_val = phy->ops.identify_sfp(hw); | ||
76 | if (ret_val != 0) | ||
77 | goto out; | ||
78 | else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { | ||
79 | ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; | ||
80 | goto out; | ||
81 | } | ||
82 | |||
83 | /* Check to see if SFP+ module is supported */ | ||
84 | ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, | ||
85 | &list_offset, | ||
86 | &data_offset); | ||
87 | if (ret_val != 0) { | ||
88 | ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; | ||
89 | goto out; | ||
90 | } | ||
91 | break; | ||
62 | default: | 92 | default: |
63 | break; | 93 | break; |
64 | } | 94 | } |
@@ -77,7 +107,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) | |||
77 | mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; | 107 | mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; |
78 | mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; | 108 | mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; |
79 | 109 | ||
80 | return 0; | 110 | out: |
111 | return ret_val; | ||
81 | } | 112 | } |
82 | 113 | ||
83 | /** | 114 | /** |
@@ -146,9 +177,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, | |||
146 | * | 177 | * |
147 | * Determines the link capabilities by reading the AUTOC register. | 178 | * Determines the link capabilities by reading the AUTOC register. |
148 | **/ | 179 | **/ |
149 | s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, | 180 | static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, |
150 | ixgbe_link_speed *speed, | 181 | ixgbe_link_speed *speed, |
151 | bool *autoneg) | 182 | bool *autoneg) |
152 | { | 183 | { |
153 | s32 status = IXGBE_ERR_LINK_SETUP; | 184 | s32 status = IXGBE_ERR_LINK_SETUP; |
154 | u16 speed_ability; | 185 | u16 speed_ability; |
@@ -186,9 +217,15 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) | |||
186 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: | 217 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: |
187 | case IXGBE_DEV_ID_82598EB_CX4: | 218 | case IXGBE_DEV_ID_82598EB_CX4: |
188 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: | 219 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: |
220 | case IXGBE_DEV_ID_82598_DA_DUAL_PORT: | ||
221 | case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: | ||
189 | case IXGBE_DEV_ID_82598EB_XF_LR: | 222 | case IXGBE_DEV_ID_82598EB_XF_LR: |
223 | case IXGBE_DEV_ID_82598EB_SFP_LOM: | ||
190 | media_type = ixgbe_media_type_fiber; | 224 | media_type = ixgbe_media_type_fiber; |
191 | break; | 225 | break; |
226 | case IXGBE_DEV_ID_82598AT: | ||
227 | media_type = ixgbe_media_type_copper; | ||
228 | break; | ||
192 | default: | 229 | default: |
193 | media_type = ixgbe_media_type_unknown; | 230 | media_type = ixgbe_media_type_unknown; |
194 | break; | 231 | break; |
@@ -205,7 +242,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) | |||
205 | * Configures the flow control settings based on SW configuration. This | 242 | * Configures the flow control settings based on SW configuration. This |
206 | * function is used for 802.3x flow control configuration only. | 243 | * function is used for 802.3x flow control configuration only. |
207 | **/ | 244 | **/ |
208 | s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) | 245 | static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) |
209 | { | 246 | { |
210 | u32 frctl_reg; | 247 | u32 frctl_reg; |
211 | u32 rmcs_reg; | 248 | u32 rmcs_reg; |
@@ -391,6 +428,46 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, | |||
391 | { | 428 | { |
392 | u32 links_reg; | 429 | u32 links_reg; |
393 | u32 i; | 430 | u32 i; |
431 | u16 link_reg, adapt_comp_reg; | ||
432 | |||
433 | /* | ||
434 | * SERDES PHY requires us to read link status from register 0xC79F. | ||
435 | * Bit 0 set indicates link is up/ready; clear indicates link down. | ||
436 | * 0xC00C is read to check that the XAUI lanes are active. Bit 0 | ||
437 | * clear indicates active; set indicates inactive. | ||
438 | */ | ||
439 | if (hw->phy.type == ixgbe_phy_nl) { | ||
440 | hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); | ||
441 | hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); | ||
442 | hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, | ||
443 | &adapt_comp_reg); | ||
444 | if (link_up_wait_to_complete) { | ||
445 | for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { | ||
446 | if ((link_reg & 1) && | ||
447 | ((adapt_comp_reg & 1) == 0)) { | ||
448 | *link_up = true; | ||
449 | break; | ||
450 | } else { | ||
451 | *link_up = false; | ||
452 | } | ||
453 | msleep(100); | ||
454 | hw->phy.ops.read_reg(hw, 0xC79F, | ||
455 | IXGBE_TWINAX_DEV, | ||
456 | &link_reg); | ||
457 | hw->phy.ops.read_reg(hw, 0xC00C, | ||
458 | IXGBE_TWINAX_DEV, | ||
459 | &adapt_comp_reg); | ||
460 | } | ||
461 | } else { | ||
462 | if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) | ||
463 | *link_up = true; | ||
464 | else | ||
465 | *link_up = false; | ||
466 | } | ||
467 | |||
468 | if (*link_up == false) | ||
469 | goto out; | ||
470 | } | ||
394 | 471 | ||
395 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); | 472 | links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); |
396 | if (link_up_wait_to_complete) { | 473 | if (link_up_wait_to_complete) { |
@@ -416,6 +493,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, | |||
416 | else | 493 | else |
417 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | 494 | *speed = IXGBE_LINK_SPEED_1GB_FULL; |
418 | 495 | ||
496 | out: | ||
419 | return 0; | 497 | return 0; |
420 | } | 498 | } |
421 | 499 | ||
@@ -648,7 +726,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) | |||
648 | * @rar: receive address register index to associate with a VMDq index | 726 | * @rar: receive address register index to associate with a VMDq index |
649 | * @vmdq: VMDq set index | 727 | * @vmdq: VMDq set index |
650 | **/ | 728 | **/ |
651 | s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | 729 | static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) |
652 | { | 730 | { |
653 | u32 rar_high; | 731 | u32 rar_high; |
654 | 732 | ||
@@ -692,8 +770,8 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | |||
692 | * | 770 | * |
693 | * Turn on/off specified VLAN in the VLAN filter table. | 771 | * Turn on/off specified VLAN in the VLAN filter table. |
694 | **/ | 772 | **/ |
695 | s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, | 773 | static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, |
696 | bool vlan_on) | 774 | bool vlan_on) |
697 | { | 775 | { |
698 | u32 regindex; | 776 | u32 regindex; |
699 | u32 bitindex; | 777 | u32 bitindex; |
@@ -816,7 +894,7 @@ static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) | |||
816 | * | 894 | * |
817 | * Performs read operation to Atlas analog register specified. | 895 | * Performs read operation to Atlas analog register specified. |
818 | **/ | 896 | **/ |
819 | s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) | 897 | static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) |
820 | { | 898 | { |
821 | u32 atlas_ctl; | 899 | u32 atlas_ctl; |
822 | 900 | ||
@@ -838,7 +916,7 @@ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) | |||
838 | * | 916 | * |
839 | * Performs write operation to Atlas analog register specified. | 917 | * Performs write operation to Atlas analog register specified. |
840 | **/ | 918 | **/ |
841 | s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) | 919 | static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) |
842 | { | 920 | { |
843 | u32 atlas_ctl; | 921 | u32 atlas_ctl; |
844 | 922 | ||
@@ -851,12 +929,75 @@ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) | |||
851 | } | 929 | } |
852 | 930 | ||
853 | /** | 931 | /** |
932 | * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module | ||
933 | * over I2C interface through an intermediate phy. | ||
934 | * @hw: pointer to hardware structure | ||
935 | * @byte_offset: EEPROM byte offset to read | ||
936 | * @eeprom_data: value read | ||
937 | * | ||
938 | * Performs byte read operation to SFP module's EEPROM over I2C interface. | ||
939 | **/ | ||
940 | static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, | ||
941 | u8 *eeprom_data) | ||
942 | { | ||
943 | s32 status = 0; | ||
944 | u16 sfp_addr = 0; | ||
945 | u16 sfp_data = 0; | ||
946 | u16 sfp_stat = 0; | ||
947 | u32 i; | ||
948 | |||
949 | if (hw->phy.type == ixgbe_phy_nl) { | ||
950 | /* | ||
951 | * phy SDA/SCL registers are at addresses 0xC30A to | ||
952 | * 0xC30D. These registers are used to talk to the SFP+ | ||
953 | * module's EEPROM through the SDA/SCL (I2C) interface. | ||
954 | */ | ||
955 | sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; | ||
956 | sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); | ||
957 | hw->phy.ops.write_reg(hw, | ||
958 | IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, | ||
959 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | ||
960 | sfp_addr); | ||
961 | |||
962 | /* Poll status */ | ||
963 | for (i = 0; i < 100; i++) { | ||
964 | hw->phy.ops.read_reg(hw, | ||
965 | IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, | ||
966 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | ||
967 | &sfp_stat); | ||
968 | sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; | ||
969 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) | ||
970 | break; | ||
971 | msleep(10); | ||
972 | } | ||
973 | |||
974 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { | ||
975 | hw_dbg(hw, "EEPROM read did not pass.\n"); | ||
976 | status = IXGBE_ERR_SFP_NOT_PRESENT; | ||
977 | goto out; | ||
978 | } | ||
979 | |||
980 | /* Read data */ | ||
981 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, | ||
982 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); | ||
983 | |||
984 | *eeprom_data = (u8)(sfp_data >> 8); | ||
985 | } else { | ||
986 | status = IXGBE_ERR_PHY; | ||
987 | goto out; | ||
988 | } | ||
989 | |||
990 | out: | ||
991 | return status; | ||
992 | } | ||
993 | |||
994 | /** | ||
854 | * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type | 995 | * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type |
855 | * @hw: pointer to hardware structure | 996 | * @hw: pointer to hardware structure |
856 | * | 997 | * |
857 | * Determines physical layer capabilities of the current configuration. | 998 | * Determines physical layer capabilities of the current configuration. |
858 | **/ | 999 | **/ |
859 | s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) | 1000 | static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) |
860 | { | 1001 | { |
861 | s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | 1002 | s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
862 | 1003 | ||
@@ -865,13 +1006,39 @@ s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) | |||
865 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: | 1006 | case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: |
866 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; | 1007 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; |
867 | break; | 1008 | break; |
1009 | case IXGBE_DEV_ID_82598_DA_DUAL_PORT: | ||
1010 | physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; | ||
1011 | break; | ||
868 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: | 1012 | case IXGBE_DEV_ID_82598AF_DUAL_PORT: |
869 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: | 1013 | case IXGBE_DEV_ID_82598AF_SINGLE_PORT: |
1014 | case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: | ||
870 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; | 1015 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; |
871 | break; | 1016 | break; |
872 | case IXGBE_DEV_ID_82598EB_XF_LR: | 1017 | case IXGBE_DEV_ID_82598EB_XF_LR: |
873 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; | 1018 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; |
874 | break; | 1019 | break; |
1020 | case IXGBE_DEV_ID_82598AT: | ||
1021 | physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T | | ||
1022 | IXGBE_PHYSICAL_LAYER_1000BASE_T); | ||
1023 | break; | ||
1024 | case IXGBE_DEV_ID_82598EB_SFP_LOM: | ||
1025 | hw->phy.ops.identify_sfp(hw); | ||
1026 | |||
1027 | switch (hw->phy.sfp_type) { | ||
1028 | case ixgbe_sfp_type_da_cu: | ||
1029 | physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; | ||
1030 | break; | ||
1031 | case ixgbe_sfp_type_sr: | ||
1032 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; | ||
1033 | break; | ||
1034 | case ixgbe_sfp_type_lr: | ||
1035 | physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; | ||
1036 | break; | ||
1037 | default: | ||
1038 | physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | ||
1039 | break; | ||
1040 | } | ||
1041 | break; | ||
875 | 1042 | ||
876 | default: | 1043 | default: |
877 | physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | 1044 | physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
@@ -923,12 +1090,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { | |||
923 | 1090 | ||
924 | static struct ixgbe_phy_operations phy_ops_82598 = { | 1091 | static struct ixgbe_phy_operations phy_ops_82598 = { |
925 | .identify = &ixgbe_identify_phy_generic, | 1092 | .identify = &ixgbe_identify_phy_generic, |
926 | /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */ | 1093 | .identify_sfp = &ixgbe_identify_sfp_module_generic, |
927 | .reset = &ixgbe_reset_phy_generic, | 1094 | .reset = &ixgbe_reset_phy_generic, |
928 | .read_reg = &ixgbe_read_phy_reg_generic, | 1095 | .read_reg = &ixgbe_read_phy_reg_generic, |
929 | .write_reg = &ixgbe_write_phy_reg_generic, | 1096 | .write_reg = &ixgbe_write_phy_reg_generic, |
930 | .setup_link = &ixgbe_setup_phy_link_generic, | 1097 | .setup_link = &ixgbe_setup_phy_link_generic, |
931 | .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, | 1098 | .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, |
1099 | .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, | ||
932 | }; | 1100 | }; |
933 | 1101 | ||
934 | struct ixgbe_info ixgbe_82598_info = { | 1102 | struct ixgbe_info ixgbe_82598_info = { |
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c new file mode 100644 index 000000000000..e2e28ac63dec --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_dcb.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2007 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | |||
30 | #include "ixgbe.h" | ||
31 | #include "ixgbe_type.h" | ||
32 | #include "ixgbe_dcb.h" | ||
33 | #include "ixgbe_dcb_82598.h" | ||
34 | |||
35 | /** | ||
36 | * ixgbe_dcb_config - Struct containing DCB settings. | ||
37 | * @dcb_config: Pointer to DCB config structure | ||
38 | * | ||
39 | * This function checks DCB rules for DCB settings. | ||
40 | * The following rules are checked: | ||
41 | * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. | ||
42 | * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth | ||
43 | * Group must total 100. | ||
44 | * 3. A Traffic Class should not be set to both Link Strict Priority | ||
45 | * and Group Strict Priority. | ||
46 | * 4. Link strict Bandwidth Groups can only have link strict traffic classes | ||
47 | * with zero bandwidth. | ||
48 | */ | ||
49 | s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config) | ||
50 | { | ||
51 | struct tc_bw_alloc *p; | ||
52 | s32 ret_val = 0; | ||
53 | u8 i, j, bw = 0, bw_id; | ||
54 | u8 bw_sum[2][MAX_BW_GROUP]; | ||
55 | bool link_strict[2][MAX_BW_GROUP]; | ||
56 | |||
57 | memset(bw_sum, 0, sizeof(bw_sum)); | ||
58 | memset(link_strict, 0, sizeof(link_strict)); | ||
59 | |||
60 | /* First Tx, then Rx */ | ||
61 | for (i = 0; i < 2; i++) { | ||
62 | /* Check each traffic class for rule violation */ | ||
63 | for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { | ||
64 | p = &dcb_config->tc_config[j].path[i]; | ||
65 | |||
66 | bw = p->bwg_percent; | ||
67 | bw_id = p->bwg_id; | ||
68 | |||
69 | if (bw_id >= MAX_BW_GROUP) { | ||
70 | ret_val = DCB_ERR_CONFIG; | ||
71 | goto err_config; | ||
72 | } | ||
73 | if (p->prio_type == prio_link) { | ||
74 | link_strict[i][bw_id] = true; | ||
75 | /* Link strict should have zero bandwidth */ | ||
76 | if (bw) { | ||
77 | ret_val = DCB_ERR_LS_BW_NONZERO; | ||
78 | goto err_config; | ||
79 | } | ||
80 | } else if (!bw) { | ||
81 | /* | ||
82 | * Traffic classes without link strict | ||
83 | * should have non-zero bandwidth. | ||
84 | */ | ||
85 | ret_val = DCB_ERR_TC_BW_ZERO; | ||
86 | goto err_config; | ||
87 | } | ||
88 | bw_sum[i][bw_id] += bw; | ||
89 | } | ||
90 | |||
91 | bw = 0; | ||
92 | |||
93 | /* Check each bandwidth group for rule violation */ | ||
94 | for (j = 0; j < MAX_BW_GROUP; j++) { | ||
95 | bw += dcb_config->bw_percentage[i][j]; | ||
96 | /* | ||
97 | * Sum of bandwidth percentages of all traffic classes | ||
98 | * within a Bandwidth Group must total 100 except for | ||
99 | * link strict group (zero bandwidth). | ||
100 | */ | ||
101 | if (link_strict[i][j]) { | ||
102 | if (bw_sum[i][j]) { | ||
103 | /* | ||
104 | * Link strict group should have zero | ||
105 | * bandwidth. | ||
106 | */ | ||
107 | ret_val = DCB_ERR_LS_BWG_NONZERO; | ||
108 | goto err_config; | ||
109 | } | ||
110 | } else if (bw_sum[i][j] != BW_PERCENT && | ||
111 | bw_sum[i][j] != 0) { | ||
112 | ret_val = DCB_ERR_TC_BW; | ||
113 | goto err_config; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | if (bw != BW_PERCENT) { | ||
118 | ret_val = DCB_ERR_BW_GROUP; | ||
119 | goto err_config; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | err_config: | ||
124 | return ret_val; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits | ||
129 | * @ixgbe_dcb_config: Struct containing DCB settings. | ||
130 | * @direction: Configuring either Tx or Rx. | ||
131 | * | ||
132 | * This function calculates the credits allocated to each traffic class. | ||
133 | * It should be called only after the rules are checked by | ||
134 | * ixgbe_dcb_check_config(). | ||
135 | */ | ||
136 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, | ||
137 | u8 direction) | ||
138 | { | ||
139 | struct tc_bw_alloc *p; | ||
140 | s32 ret_val = 0; | ||
141 | /* Initialization values default for Tx settings */ | ||
142 | u32 credit_refill = 0; | ||
143 | u32 credit_max = 0; | ||
144 | u16 link_percentage = 0; | ||
145 | u8 bw_percent = 0; | ||
146 | u8 i; | ||
147 | |||
148 | if (dcb_config == NULL) { | ||
149 | ret_val = DCB_ERR_CONFIG; | ||
150 | goto out; | ||
151 | } | ||
152 | |||
153 | /* Find out the link percentage for each TC first */ | ||
154 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
155 | p = &dcb_config->tc_config[i].path[direction]; | ||
156 | bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; | ||
157 | |||
158 | link_percentage = p->bwg_percent; | ||
159 | /* Must be careful of integer division for very small nums */ | ||
160 | link_percentage = (link_percentage * bw_percent) / 100; | ||
161 | if (p->bwg_percent > 0 && link_percentage == 0) | ||
162 | link_percentage = 1; | ||
163 | |||
164 | /* Save link_percentage for reference */ | ||
165 | p->link_percent = (u8)link_percentage; | ||
166 | |||
167 | /* Calculate credit refill and save it */ | ||
168 | credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; | ||
169 | p->data_credits_refill = (u16)credit_refill; | ||
170 | |||
171 | /* Calculate maximum credit for the TC */ | ||
172 | credit_max = (link_percentage * MAX_CREDIT) / 100; | ||
173 | |||
174 | /* | ||
175 | * Adjustment based on rule checking, if the percentage | ||
176 | * of a TC is too small, the maximum credit may not be | ||
177 | * enough to send out a jumbo frame in data plane arbitration. | ||
178 | */ | ||
179 | if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO)) | ||
180 | credit_max = MINIMUM_CREDIT_FOR_JUMBO; | ||
181 | |||
182 | if (direction == DCB_TX_CONFIG) { | ||
183 | /* | ||
184 | * Adjustment based on rule checking, if the | ||
185 | * percentage of a TC is too small, the maximum | ||
186 | * credit may not be enough to send out a TSO | ||
187 | * packet in descriptor plane arbitration. | ||
188 | */ | ||
189 | if (credit_max && | ||
190 | (credit_max < MINIMUM_CREDIT_FOR_TSO)) | ||
191 | credit_max = MINIMUM_CREDIT_FOR_TSO; | ||
192 | |||
193 | dcb_config->tc_config[i].desc_credits_max = | ||
194 | (u16)credit_max; | ||
195 | } | ||
196 | |||
197 | p->data_credits_max = (u16)credit_max; | ||
198 | } | ||
199 | |||
200 | out: | ||
201 | return ret_val; | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * ixgbe_dcb_get_tc_stats - Returns status of each traffic class | ||
206 | * @hw: pointer to hardware structure | ||
207 | * @stats: pointer to statistics structure | ||
208 | * @tc_count: Number of elements in bwg_array. | ||
209 | * | ||
210 | * This function returns the status data for each of the Traffic Classes in use. | ||
211 | */ | ||
212 | s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, | ||
213 | u8 tc_count) | ||
214 | { | ||
215 | s32 ret = 0; | ||
216 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
217 | ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class | ||
223 | * hw - pointer to hardware structure | ||
224 | * stats - pointer to statistics structure | ||
225 | * tc_count - Number of elements in bwg_array. | ||
226 | * | ||
227 | * This function returns the CBFC status data for each of the Traffic Classes. | ||
228 | */ | ||
229 | s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, | ||
230 | u8 tc_count) | ||
231 | { | ||
232 | s32 ret = 0; | ||
233 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
234 | ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter | ||
240 | * @hw: pointer to hardware structure | ||
241 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
242 | * | ||
243 | * Configure Rx Data Arbiter and credits for each traffic class. | ||
244 | */ | ||
245 | s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw, | ||
246 | struct ixgbe_dcb_config *dcb_config) | ||
247 | { | ||
248 | s32 ret = 0; | ||
249 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
250 | ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); | ||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter | ||
256 | * @hw: pointer to hardware structure | ||
257 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
258 | * | ||
259 | * Configure Tx Descriptor Arbiter and credits for each traffic class. | ||
260 | */ | ||
261 | s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw, | ||
262 | struct ixgbe_dcb_config *dcb_config) | ||
263 | { | ||
264 | s32 ret = 0; | ||
265 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
266 | ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); | ||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | /** | ||
271 | * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter | ||
272 | * @hw: pointer to hardware structure | ||
273 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
274 | * | ||
275 | * Configure Tx Data Arbiter and credits for each traffic class. | ||
276 | */ | ||
277 | s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw, | ||
278 | struct ixgbe_dcb_config *dcb_config) | ||
279 | { | ||
280 | s32 ret = 0; | ||
281 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
282 | ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * ixgbe_dcb_config_pfc - Config priority flow control | ||
288 | * @hw: pointer to hardware structure | ||
289 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
290 | * | ||
291 | * Configure Priority Flow Control for each traffic class. | ||
292 | */ | ||
293 | s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, | ||
294 | struct ixgbe_dcb_config *dcb_config) | ||
295 | { | ||
296 | s32 ret = 0; | ||
297 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
298 | ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config); | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | /** | ||
303 | * ixgbe_dcb_config_tc_stats - Config traffic class statistics | ||
304 | * @hw: pointer to hardware structure | ||
305 | * | ||
306 | * Configure queue statistics registers, all queues belonging to same traffic | ||
307 | * class uses a single set of queue statistics counters. | ||
308 | */ | ||
309 | s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) | ||
310 | { | ||
311 | s32 ret = 0; | ||
312 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
313 | ret = ixgbe_dcb_config_tc_stats_82598(hw); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * ixgbe_dcb_hw_config - Config and enable DCB | ||
319 | * @hw: pointer to hardware structure | ||
320 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
321 | * | ||
322 | * Configure dcb settings and enable dcb mode. | ||
323 | */ | ||
324 | s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, | ||
325 | struct ixgbe_dcb_config *dcb_config) | ||
326 | { | ||
327 | s32 ret = 0; | ||
328 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
329 | ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); | ||
330 | return ret; | ||
331 | } | ||
332 | |||
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h new file mode 100644 index 000000000000..75f6efe1e369 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_dcb.h | |||
@@ -0,0 +1,184 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2007 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _DCB_CONFIG_H_ | ||
30 | #define _DCB_CONFIG_H_ | ||
31 | |||
32 | #include "ixgbe_type.h" | ||
33 | |||
34 | /* DCB data structures */ | ||
35 | |||
36 | #define IXGBE_MAX_PACKET_BUFFERS 8 | ||
37 | #define MAX_USER_PRIORITY 8 | ||
38 | #define MAX_TRAFFIC_CLASS 8 | ||
39 | #define MAX_BW_GROUP 8 | ||
40 | #define BW_PERCENT 100 | ||
41 | |||
42 | #define DCB_TX_CONFIG 0 | ||
43 | #define DCB_RX_CONFIG 1 | ||
44 | |||
45 | /* DCB error Codes */ | ||
46 | #define DCB_SUCCESS 0 | ||
47 | #define DCB_ERR_CONFIG -1 | ||
48 | #define DCB_ERR_PARAM -2 | ||
49 | |||
50 | /* Transmit and receive Errors */ | ||
51 | /* Error in bandwidth group allocation */ | ||
52 | #define DCB_ERR_BW_GROUP -3 | ||
53 | /* Error in traffic class bandwidth allocation */ | ||
54 | #define DCB_ERR_TC_BW -4 | ||
55 | /* Traffic class has both link strict and group strict enabled */ | ||
56 | #define DCB_ERR_LS_GS -5 | ||
57 | /* Link strict traffic class has non zero bandwidth */ | ||
58 | #define DCB_ERR_LS_BW_NONZERO -6 | ||
59 | /* Link strict bandwidth group has non zero bandwidth */ | ||
60 | #define DCB_ERR_LS_BWG_NONZERO -7 | ||
61 | /* Traffic class has zero bandwidth */ | ||
62 | #define DCB_ERR_TC_BW_ZERO -8 | ||
63 | |||
64 | #define DCB_NOT_IMPLEMENTED 0x7FFFFFFF | ||
65 | |||
66 | struct dcb_pfc_tc_debug { | ||
67 | u8 tc; | ||
68 | u8 pause_status; | ||
69 | u64 pause_quanta; | ||
70 | }; | ||
71 | |||
72 | enum strict_prio_type { | ||
73 | prio_none = 0, | ||
74 | prio_group, | ||
75 | prio_link | ||
76 | }; | ||
77 | |||
78 | /* Traffic class bandwidth allocation per direction */ | ||
79 | struct tc_bw_alloc { | ||
80 | u8 bwg_id; /* Bandwidth Group (BWG) ID */ | ||
81 | u8 bwg_percent; /* % of BWG's bandwidth */ | ||
82 | u8 link_percent; /* % of link bandwidth */ | ||
83 | u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ | ||
84 | u16 data_credits_refill; /* Credit refill amount in 64B granularity */ | ||
85 | u16 data_credits_max; /* Max credits for a configured packet buffer | ||
86 | * in 64B granularity.*/ | ||
87 | enum strict_prio_type prio_type; /* Link or Group Strict Priority */ | ||
88 | }; | ||
89 | |||
90 | enum dcb_pfc_type { | ||
91 | pfc_disabled = 0, | ||
92 | pfc_enabled_full, | ||
93 | pfc_enabled_tx, | ||
94 | pfc_enabled_rx | ||
95 | }; | ||
96 | |||
97 | /* Traffic class configuration */ | ||
98 | struct tc_configuration { | ||
99 | struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ | ||
100 | enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ | ||
101 | |||
102 | u16 desc_credits_max; /* For Tx Descriptor arbitration */ | ||
103 | u8 tc; /* Traffic class (TC) */ | ||
104 | }; | ||
105 | |||
106 | enum dcb_rx_pba_cfg { | ||
107 | pba_equal, /* PBA[0-7] each use 64KB FIFO */ | ||
108 | pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * This structure contains many values encoded as fixed-point | ||
113 | * numbers, meaning that some of bits are dedicated to the | ||
114 | * magnitude and others to the fraction part. In the comments | ||
115 | * this is shown as f=n, where n is the number of fraction bits. | ||
116 | * These fraction bits are always the low-order bits. The size | ||
117 | * of the magnitude is not specified. | ||
118 | */ | ||
119 | struct bcn_config { | ||
120 | u32 rp_admin_mode[MAX_TRAFFIC_CLASS]; /* BCN enabled, per TC */ | ||
121 | u32 bcna_option[2]; /* BCNA Port + MAC Addr */ | ||
122 | u32 rp_w; /* Derivative Weight, f=3 */ | ||
123 | u32 rp_gi; /* Increase Gain, f=12 */ | ||
124 | u32 rp_gd; /* Decrease Gain, f=12 */ | ||
125 | u32 rp_ru; /* Rate Unit */ | ||
126 | u32 rp_alpha; /* Max Decrease Factor, f=12 */ | ||
127 | u32 rp_beta; /* Max Increase Factor, f=12 */ | ||
128 | u32 rp_ri; /* Initial Rate */ | ||
129 | u32 rp_td; /* Drift Interval Timer */ | ||
130 | u32 rp_rd; /* Drift Increase */ | ||
131 | u32 rp_tmax; /* Severe Congestion Backoff Timer Range */ | ||
132 | u32 rp_rmin; /* Severe Congestion Restart Rate */ | ||
133 | u32 rp_wrtt; /* RTT Moving Average Weight */ | ||
134 | }; | ||
135 | |||
136 | struct ixgbe_dcb_config { | ||
137 | struct bcn_config bcn; | ||
138 | |||
139 | struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; | ||
140 | u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ | ||
141 | |||
142 | bool round_robin_enable; | ||
143 | |||
144 | enum dcb_rx_pba_cfg rx_pba_cfg; | ||
145 | |||
146 | u32 dcb_cfg_version; /* Not used...OS-specific? */ | ||
147 | u32 link_speed; /* For bandwidth allocation validation purpose */ | ||
148 | }; | ||
149 | |||
150 | /* DCB driver APIs */ | ||
151 | |||
152 | /* DCB rule checking function.*/ | ||
153 | s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config); | ||
154 | |||
155 | /* DCB credits calculation */ | ||
156 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); | ||
157 | |||
158 | /* DCB PFC functions */ | ||
159 | s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g); | ||
160 | s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); | ||
161 | |||
162 | /* DCB traffic class stats */ | ||
163 | s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); | ||
164 | s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); | ||
165 | |||
166 | /* DCB config arbiters */ | ||
167 | s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *, | ||
168 | struct ixgbe_dcb_config *); | ||
169 | s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *, | ||
170 | struct ixgbe_dcb_config *); | ||
171 | s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *); | ||
172 | |||
173 | /* DCB hw initialization */ | ||
174 | s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); | ||
175 | |||
176 | /* DCB definitions for credit calculation */ | ||
177 | #define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ | ||
178 | #define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */ | ||
179 | #define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */ | ||
180 | #define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ | ||
181 | #define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ | ||
182 | #define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ | ||
183 | |||
184 | #endif /* _DCB_CONFIG_H */ | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c new file mode 100644 index 000000000000..2c046b0b5d28 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c | |||
@@ -0,0 +1,398 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2007 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #include "ixgbe.h" | ||
30 | #include "ixgbe_type.h" | ||
31 | #include "ixgbe_dcb.h" | ||
32 | #include "ixgbe_dcb_82598.h" | ||
33 | |||
34 | /** | ||
35 | * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class | ||
36 | * @hw: pointer to hardware structure | ||
37 | * @stats: pointer to statistics structure | ||
38 | * @tc_count: Number of elements in bwg_array. | ||
39 | * | ||
40 | * This function returns the status data for each of the Traffic Classes in use. | ||
41 | */ | ||
42 | s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, | ||
43 | struct ixgbe_hw_stats *stats, | ||
44 | u8 tc_count) | ||
45 | { | ||
46 | int tc; | ||
47 | |||
48 | if (tc_count > MAX_TRAFFIC_CLASS) | ||
49 | return DCB_ERR_PARAM; | ||
50 | |||
51 | /* Statistics pertaining to each traffic class */ | ||
52 | for (tc = 0; tc < tc_count; tc++) { | ||
53 | /* Transmitted Packets */ | ||
54 | stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); | ||
55 | /* Transmitted Bytes */ | ||
56 | stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); | ||
57 | /* Received Packets */ | ||
58 | stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); | ||
59 | /* Received Bytes */ | ||
60 | stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); | ||
61 | } | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data | ||
68 | * @hw: pointer to hardware structure | ||
69 | * @stats: pointer to statistics structure | ||
70 | * @tc_count: Number of elements in bwg_array. | ||
71 | * | ||
72 | * This function returns the CBFC status data for each of the Traffic Classes. | ||
73 | */ | ||
74 | s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, | ||
75 | struct ixgbe_hw_stats *stats, | ||
76 | u8 tc_count) | ||
77 | { | ||
78 | int tc; | ||
79 | |||
80 | if (tc_count > MAX_TRAFFIC_CLASS) | ||
81 | return DCB_ERR_PARAM; | ||
82 | |||
83 | for (tc = 0; tc < tc_count; tc++) { | ||
84 | /* Priority XOFF Transmitted */ | ||
85 | stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); | ||
86 | /* Priority XOFF Received */ | ||
87 | stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); | ||
88 | } | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers | ||
95 | * @hw: pointer to hardware structure | ||
96 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
97 | * | ||
98 | * Configure packet buffers for DCB mode. | ||
99 | */ | ||
100 | static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, | ||
101 | struct ixgbe_dcb_config *dcb_config) | ||
102 | { | ||
103 | s32 ret_val = 0; | ||
104 | u32 value = IXGBE_RXPBSIZE_64KB; | ||
105 | u8 i = 0; | ||
106 | |||
107 | /* Setup Rx packet buffer sizes */ | ||
108 | switch (dcb_config->rx_pba_cfg) { | ||
109 | case pba_80_48: | ||
110 | /* Setup the first four at 80KB */ | ||
111 | value = IXGBE_RXPBSIZE_80KB; | ||
112 | for (; i < 4; i++) | ||
113 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); | ||
114 | /* Setup the last four at 48KB...don't re-init i */ | ||
115 | value = IXGBE_RXPBSIZE_48KB; | ||
116 | /* Fall Through */ | ||
117 | case pba_equal: | ||
118 | default: | ||
119 | for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) | ||
120 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); | ||
121 | |||
122 | /* Setup Tx packet buffer sizes */ | ||
123 | for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { | ||
124 | IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), | ||
125 | IXGBE_TXPBSIZE_40KB); | ||
126 | } | ||
127 | break; | ||
128 | } | ||
129 | |||
130 | return ret_val; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter | ||
135 | * @hw: pointer to hardware structure | ||
136 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
137 | * | ||
138 | * Configure Rx Data Arbiter and credits for each traffic class. | ||
139 | */ | ||
140 | s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, | ||
141 | struct ixgbe_dcb_config *dcb_config) | ||
142 | { | ||
143 | struct tc_bw_alloc *p; | ||
144 | u32 reg = 0; | ||
145 | u32 credit_refill = 0; | ||
146 | u32 credit_max = 0; | ||
147 | u8 i = 0; | ||
148 | |||
149 | reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; | ||
150 | IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); | ||
151 | |||
152 | reg = IXGBE_READ_REG(hw, IXGBE_RMCS); | ||
153 | /* Enable Arbiter */ | ||
154 | reg &= ~IXGBE_RMCS_ARBDIS; | ||
155 | /* Enable Receive Recycle within the BWG */ | ||
156 | reg |= IXGBE_RMCS_RRM; | ||
157 | /* Enable Deficit Fixed Priority arbitration*/ | ||
158 | reg |= IXGBE_RMCS_DFP; | ||
159 | |||
160 | IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); | ||
161 | |||
162 | /* Configure traffic class credits and priority */ | ||
163 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
164 | p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; | ||
165 | credit_refill = p->data_credits_refill; | ||
166 | credit_max = p->data_credits_max; | ||
167 | |||
168 | reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); | ||
169 | |||
170 | if (p->prio_type == prio_link) | ||
171 | reg |= IXGBE_RT2CR_LSP; | ||
172 | |||
173 | IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); | ||
174 | } | ||
175 | |||
176 | reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | ||
177 | reg |= IXGBE_RDRXCTL_RDMTS_1_2; | ||
178 | reg |= IXGBE_RDRXCTL_MPBEN; | ||
179 | reg |= IXGBE_RDRXCTL_MCEN; | ||
180 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); | ||
181 | |||
182 | reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | ||
183 | /* Make sure there is enough descriptors before arbitration */ | ||
184 | reg &= ~IXGBE_RXCTRL_DMBYPS; | ||
185 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter | ||
192 | * @hw: pointer to hardware structure | ||
193 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
194 | * | ||
195 | * Configure Tx Descriptor Arbiter and credits for each traffic class. | ||
196 | */ | ||
197 | s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, | ||
198 | struct ixgbe_dcb_config *dcb_config) | ||
199 | { | ||
200 | struct tc_bw_alloc *p; | ||
201 | u32 reg, max_credits; | ||
202 | u8 i; | ||
203 | |||
204 | reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); | ||
205 | |||
206 | /* Enable arbiter */ | ||
207 | reg &= ~IXGBE_DPMCS_ARBDIS; | ||
208 | if (!(dcb_config->round_robin_enable)) { | ||
209 | /* Enable DFP and Recycle mode */ | ||
210 | reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); | ||
211 | } | ||
212 | reg |= IXGBE_DPMCS_TSOEF; | ||
213 | /* Configure Max TSO packet size 34KB including payload and headers */ | ||
214 | reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); | ||
215 | |||
216 | IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); | ||
217 | |||
218 | /* Configure traffic class credits and priority */ | ||
219 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
220 | p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; | ||
221 | max_credits = dcb_config->tc_config[i].desc_credits_max; | ||
222 | reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; | ||
223 | reg |= p->data_credits_refill; | ||
224 | reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; | ||
225 | |||
226 | if (p->prio_type == prio_group) | ||
227 | reg |= IXGBE_TDTQ2TCCR_GSP; | ||
228 | |||
229 | if (p->prio_type == prio_link) | ||
230 | reg |= IXGBE_TDTQ2TCCR_LSP; | ||
231 | |||
232 | IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); | ||
233 | } | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter | ||
240 | * @hw: pointer to hardware structure | ||
241 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
242 | * | ||
243 | * Configure Tx Data Arbiter and credits for each traffic class. | ||
244 | */ | ||
245 | s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, | ||
246 | struct ixgbe_dcb_config *dcb_config) | ||
247 | { | ||
248 | struct tc_bw_alloc *p; | ||
249 | u32 reg; | ||
250 | u8 i; | ||
251 | |||
252 | reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); | ||
253 | /* Enable Data Plane Arbiter */ | ||
254 | reg &= ~IXGBE_PDPMCS_ARBDIS; | ||
255 | /* Enable DFP and Transmit Recycle Mode */ | ||
256 | reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); | ||
257 | |||
258 | IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); | ||
259 | |||
260 | /* Configure traffic class credits and priority */ | ||
261 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
262 | p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; | ||
263 | reg = p->data_credits_refill; | ||
264 | reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; | ||
265 | reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT; | ||
266 | |||
267 | if (p->prio_type == prio_group) | ||
268 | reg |= IXGBE_TDPT2TCCR_GSP; | ||
269 | |||
270 | if (p->prio_type == prio_link) | ||
271 | reg |= IXGBE_TDPT2TCCR_LSP; | ||
272 | |||
273 | IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); | ||
274 | } | ||
275 | |||
276 | /* Enable Tx packet buffer division */ | ||
277 | reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); | ||
278 | reg |= IXGBE_DTXCTL_ENDBUBD; | ||
279 | IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * ixgbe_dcb_config_pfc_82598 - Config priority flow control | ||
286 | * @hw: pointer to hardware structure | ||
287 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
288 | * | ||
289 | * Configure Priority Flow Control for each traffic class. | ||
290 | */ | ||
291 | s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, | ||
292 | struct ixgbe_dcb_config *dcb_config) | ||
293 | { | ||
294 | u32 reg, rx_pba_size; | ||
295 | u8 i; | ||
296 | |||
297 | /* Enable Transmit Priority Flow Control */ | ||
298 | reg = IXGBE_READ_REG(hw, IXGBE_RMCS); | ||
299 | reg &= ~IXGBE_RMCS_TFCE_802_3X; | ||
300 | /* correct the reporting of our flow control status */ | ||
301 | hw->fc.type = ixgbe_fc_none; | ||
302 | reg |= IXGBE_RMCS_TFCE_PRIORITY; | ||
303 | IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); | ||
304 | |||
305 | /* Enable Receive Priority Flow Control */ | ||
306 | reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); | ||
307 | reg &= ~IXGBE_FCTRL_RFCE; | ||
308 | reg |= IXGBE_FCTRL_RPFCE; | ||
309 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); | ||
310 | |||
311 | /* | ||
312 | * Configure flow control thresholds and enable priority flow control | ||
313 | * for each traffic class. | ||
314 | */ | ||
315 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
316 | if (dcb_config->rx_pba_cfg == pba_equal) { | ||
317 | rx_pba_size = IXGBE_RXPBSIZE_64KB; | ||
318 | } else { | ||
319 | rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB | ||
320 | : IXGBE_RXPBSIZE_48KB; | ||
321 | } | ||
322 | |||
323 | reg = ((rx_pba_size >> 5) & 0xFFF0); | ||
324 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || | ||
325 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) | ||
326 | reg |= IXGBE_FCRTL_XONE; | ||
327 | |||
328 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); | ||
329 | |||
330 | reg = ((rx_pba_size >> 2) & 0xFFF0); | ||
331 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || | ||
332 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) | ||
333 | reg |= IXGBE_FCRTH_FCEN; | ||
334 | |||
335 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); | ||
336 | } | ||
337 | |||
338 | /* Configure pause time */ | ||
339 | for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) | ||
340 | IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); | ||
341 | |||
342 | /* Configure flow control refresh threshold value */ | ||
343 | IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics | ||
350 | * @hw: pointer to hardware structure | ||
351 | * | ||
352 | * Configure queue statistics registers, all queues belonging to same traffic | ||
353 | * class uses a single set of queue statistics counters. | ||
354 | */ | ||
355 | s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) | ||
356 | { | ||
357 | u32 reg = 0; | ||
358 | u8 i = 0; | ||
359 | u8 j = 0; | ||
360 | |||
361 | /* Receive Queues stats setting - 8 queues per statistics reg */ | ||
362 | for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { | ||
363 | reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); | ||
364 | reg |= ((0x1010101) * j); | ||
365 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); | ||
366 | reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); | ||
367 | reg |= ((0x1010101) * j); | ||
368 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); | ||
369 | } | ||
370 | /* Transmit Queues stats setting - 4 queues per statistics reg */ | ||
371 | for (i = 0; i < 8; i++) { | ||
372 | reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); | ||
373 | reg |= ((0x1010101) * i); | ||
374 | IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); | ||
375 | } | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * ixgbe_dcb_hw_config_82598 - Config and enable DCB | ||
382 | * @hw: pointer to hardware structure | ||
383 | * @dcb_config: pointer to ixgbe_dcb_config structure | ||
384 | * | ||
385 | * Configure dcb settings and enable dcb mode. | ||
386 | */ | ||
387 | s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, | ||
388 | struct ixgbe_dcb_config *dcb_config) | ||
389 | { | ||
390 | ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); | ||
391 | ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); | ||
392 | ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); | ||
393 | ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); | ||
394 | ixgbe_dcb_config_pfc_82598(hw, dcb_config); | ||
395 | ixgbe_dcb_config_tc_stats_82598(hw); | ||
396 | |||
397 | return 0; | ||
398 | } | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h new file mode 100644 index 000000000000..1e6a313719d7 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2007 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #ifndef _DCB_82598_CONFIG_H_ | ||
30 | #define _DCB_82598_CONFIG_H_ | ||
31 | |||
32 | /* DCB register definitions */ | ||
33 | |||
34 | #define IXGBE_DPMCS_MTSOS_SHIFT 16 | ||
35 | #define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ | ||
36 | #define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ | ||
37 | #define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ | ||
38 | #define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ | ||
39 | |||
40 | #define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ | ||
41 | |||
42 | #define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ | ||
43 | #define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ | ||
44 | |||
45 | #define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ | ||
46 | #define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ | ||
47 | |||
48 | #define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 | ||
49 | #define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 | ||
50 | #define IXGBE_TDTQ2TCCR_GSP 0x40000000 | ||
51 | #define IXGBE_TDTQ2TCCR_LSP 0x80000000 | ||
52 | |||
53 | #define IXGBE_TDPT2TCCR_MCL_SHIFT 12 | ||
54 | #define IXGBE_TDPT2TCCR_BWG_SHIFT 9 | ||
55 | #define IXGBE_TDPT2TCCR_GSP 0x40000000 | ||
56 | #define IXGBE_TDPT2TCCR_LSP 0x80000000 | ||
57 | |||
58 | #define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ | ||
59 | #define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ | ||
60 | #define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ | ||
61 | |||
62 | #define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ | ||
63 | |||
64 | #define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ | ||
65 | #define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ | ||
66 | #define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ | ||
67 | #define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ | ||
68 | |||
69 | #define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 | ||
70 | |||
71 | /* DCB hardware-specific driver APIs */ | ||
72 | |||
73 | /* DCB PFC functions */ | ||
74 | s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); | ||
75 | s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *, | ||
76 | u8); | ||
77 | |||
78 | /* DCB traffic class stats */ | ||
79 | s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); | ||
80 | s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *, | ||
81 | u8); | ||
82 | |||
83 | /* DCB config arbiters */ | ||
84 | s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, | ||
85 | struct ixgbe_dcb_config *); | ||
86 | s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, | ||
87 | struct ixgbe_dcb_config *); | ||
88 | s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, | ||
89 | struct ixgbe_dcb_config *); | ||
90 | |||
91 | /* DCB hw initialization */ | ||
92 | s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); | ||
93 | |||
94 | #endif /* _DCB_82598_CONFIG_H */ | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c new file mode 100644 index 000000000000..4129976953f5 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -0,0 +1,641 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2008 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | |||
29 | #include "ixgbe.h" | ||
30 | #include <linux/dcbnl.h> | ||
31 | |||
32 | /* Callbacks for DCB netlink in the kernel */ | ||
33 | #define BIT_DCB_MODE 0x01 | ||
34 | #define BIT_PFC 0x02 | ||
35 | #define BIT_PG_RX 0x04 | ||
36 | #define BIT_PG_TX 0x08 | ||
37 | #define BIT_BCN 0x10 | ||
38 | |||
39 | int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, | ||
40 | struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) | ||
41 | { | ||
42 | struct tc_configuration *src_tc_cfg = NULL; | ||
43 | struct tc_configuration *dst_tc_cfg = NULL; | ||
44 | int i; | ||
45 | |||
46 | if (!src_dcb_cfg || !dst_dcb_cfg) | ||
47 | return -EINVAL; | ||
48 | |||
49 | for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { | ||
50 | src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; | ||
51 | dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; | ||
52 | |||
53 | dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = | ||
54 | src_tc_cfg->path[DCB_TX_CONFIG].prio_type; | ||
55 | |||
56 | dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = | ||
57 | src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; | ||
58 | |||
59 | dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = | ||
60 | src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; | ||
61 | |||
62 | dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = | ||
63 | src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; | ||
64 | |||
65 | dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = | ||
66 | src_tc_cfg->path[DCB_RX_CONFIG].prio_type; | ||
67 | |||
68 | dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = | ||
69 | src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; | ||
70 | |||
71 | dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = | ||
72 | src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; | ||
73 | |||
74 | dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = | ||
75 | src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; | ||
76 | } | ||
77 | |||
78 | for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { | ||
79 | dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] | ||
80 | [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage | ||
81 | [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; | ||
82 | dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] | ||
83 | [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage | ||
84 | [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; | ||
85 | } | ||
86 | |||
87 | for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { | ||
88 | dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = | ||
89 | src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; | ||
90 | } | ||
91 | |||
92 | for (i = DCB_BCN_ATTR_RP_0; i < DCB_BCN_ATTR_RP_ALL; i++) { | ||
93 | dst_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0] = | ||
94 | src_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0]; | ||
95 | } | ||
96 | dst_dcb_cfg->bcn.bcna_option[0] = src_dcb_cfg->bcn.bcna_option[0]; | ||
97 | dst_dcb_cfg->bcn.bcna_option[1] = src_dcb_cfg->bcn.bcna_option[1]; | ||
98 | dst_dcb_cfg->bcn.rp_alpha = src_dcb_cfg->bcn.rp_alpha; | ||
99 | dst_dcb_cfg->bcn.rp_beta = src_dcb_cfg->bcn.rp_beta; | ||
100 | dst_dcb_cfg->bcn.rp_gd = src_dcb_cfg->bcn.rp_gd; | ||
101 | dst_dcb_cfg->bcn.rp_gi = src_dcb_cfg->bcn.rp_gi; | ||
102 | dst_dcb_cfg->bcn.rp_tmax = src_dcb_cfg->bcn.rp_tmax; | ||
103 | dst_dcb_cfg->bcn.rp_td = src_dcb_cfg->bcn.rp_td; | ||
104 | dst_dcb_cfg->bcn.rp_rmin = src_dcb_cfg->bcn.rp_rmin; | ||
105 | dst_dcb_cfg->bcn.rp_w = src_dcb_cfg->bcn.rp_w; | ||
106 | dst_dcb_cfg->bcn.rp_rd = src_dcb_cfg->bcn.rp_rd; | ||
107 | dst_dcb_cfg->bcn.rp_ru = src_dcb_cfg->bcn.rp_ru; | ||
108 | dst_dcb_cfg->bcn.rp_wrtt = src_dcb_cfg->bcn.rp_wrtt; | ||
109 | dst_dcb_cfg->bcn.rp_ri = src_dcb_cfg->bcn.rp_ri; | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) | ||
115 | { | ||
116 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
117 | |||
118 | DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n"); | ||
119 | |||
120 | return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); | ||
121 | } | ||
122 | |||
123 | static u16 ixgbe_dcb_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
124 | { | ||
125 | /* All traffic should default to class 0 */ | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | ||
130 | { | ||
131 | u8 err = 0; | ||
132 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
133 | |||
134 | DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n"); | ||
135 | |||
136 | if (state > 0) { | ||
137 | /* Turn on DCB */ | ||
138 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
139 | goto out; | ||
140 | |||
141 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { | ||
142 | DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n"); | ||
143 | err = 1; | ||
144 | goto out; | ||
145 | } | ||
146 | |||
147 | if (netif_running(netdev)) | ||
148 | netdev->netdev_ops->ndo_stop(netdev); | ||
149 | ixgbe_reset_interrupt_capability(adapter); | ||
150 | ixgbe_napi_del_all(adapter); | ||
151 | INIT_LIST_HEAD(&netdev->napi_list); | ||
152 | kfree(adapter->tx_ring); | ||
153 | kfree(adapter->rx_ring); | ||
154 | adapter->tx_ring = NULL; | ||
155 | adapter->rx_ring = NULL; | ||
156 | netdev->select_queue = &ixgbe_dcb_select_queue; | ||
157 | |||
158 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
159 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; | ||
160 | ixgbe_init_interrupt_scheme(adapter); | ||
161 | if (netif_running(netdev)) | ||
162 | netdev->netdev_ops->ndo_open(netdev); | ||
163 | } else { | ||
164 | /* Turn off DCB */ | ||
165 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
166 | if (netif_running(netdev)) | ||
167 | netdev->netdev_ops->ndo_stop(netdev); | ||
168 | ixgbe_reset_interrupt_capability(adapter); | ||
169 | ixgbe_napi_del_all(adapter); | ||
170 | INIT_LIST_HEAD(&netdev->napi_list); | ||
171 | kfree(adapter->tx_ring); | ||
172 | kfree(adapter->rx_ring); | ||
173 | adapter->tx_ring = NULL; | ||
174 | adapter->rx_ring = NULL; | ||
175 | netdev->select_queue = NULL; | ||
176 | |||
177 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
178 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | ||
179 | ixgbe_init_interrupt_scheme(adapter); | ||
180 | if (netif_running(netdev)) | ||
181 | netdev->netdev_ops->ndo_open(netdev); | ||
182 | } | ||
183 | } | ||
184 | out: | ||
185 | return err; | ||
186 | } | ||
187 | |||
188 | static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, | ||
189 | u8 *perm_addr) | ||
190 | { | ||
191 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
192 | int i; | ||
193 | |||
194 | for (i = 0; i < netdev->addr_len; i++) | ||
195 | perm_addr[i] = adapter->hw.mac.perm_addr[i]; | ||
196 | } | ||
197 | |||
198 | static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, | ||
199 | u8 prio, u8 bwg_id, u8 bw_pct, | ||
200 | u8 up_map) | ||
201 | { | ||
202 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
203 | |||
204 | if (prio != DCB_ATTR_VALUE_UNDEFINED) | ||
205 | adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; | ||
206 | if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) | ||
207 | adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; | ||
208 | if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) | ||
209 | adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = | ||
210 | bw_pct; | ||
211 | if (up_map != DCB_ATTR_VALUE_UNDEFINED) | ||
212 | adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = | ||
213 | up_map; | ||
214 | |||
215 | if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != | ||
216 | adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || | ||
217 | (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != | ||
218 | adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || | ||
219 | (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != | ||
220 | adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || | ||
221 | (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != | ||
222 | adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) | ||
223 | adapter->dcb_set_bitmap |= BIT_PG_TX; | ||
224 | } | ||
225 | |||
226 | static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, | ||
227 | u8 bw_pct) | ||
228 | { | ||
229 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
230 | |||
231 | adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; | ||
232 | |||
233 | if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != | ||
234 | adapter->dcb_cfg.bw_percentage[0][bwg_id]) | ||
235 | adapter->dcb_set_bitmap |= BIT_PG_RX; | ||
236 | } | ||
237 | |||
238 | static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, | ||
239 | u8 prio, u8 bwg_id, u8 bw_pct, | ||
240 | u8 up_map) | ||
241 | { | ||
242 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
243 | |||
244 | if (prio != DCB_ATTR_VALUE_UNDEFINED) | ||
245 | adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; | ||
246 | if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) | ||
247 | adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; | ||
248 | if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) | ||
249 | adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = | ||
250 | bw_pct; | ||
251 | if (up_map != DCB_ATTR_VALUE_UNDEFINED) | ||
252 | adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = | ||
253 | up_map; | ||
254 | |||
255 | if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != | ||
256 | adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || | ||
257 | (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != | ||
258 | adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || | ||
259 | (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != | ||
260 | adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || | ||
261 | (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != | ||
262 | adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) | ||
263 | adapter->dcb_set_bitmap |= BIT_PG_RX; | ||
264 | } | ||
265 | |||
266 | static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, | ||
267 | u8 bw_pct) | ||
268 | { | ||
269 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
270 | |||
271 | adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; | ||
272 | |||
273 | if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != | ||
274 | adapter->dcb_cfg.bw_percentage[1][bwg_id]) | ||
275 | adapter->dcb_set_bitmap |= BIT_PG_RX; | ||
276 | } | ||
277 | |||
278 | static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, | ||
279 | u8 *prio, u8 *bwg_id, u8 *bw_pct, | ||
280 | u8 *up_map) | ||
281 | { | ||
282 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
283 | |||
284 | *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; | ||
285 | *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; | ||
286 | *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; | ||
287 | *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; | ||
288 | } | ||
289 | |||
290 | static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, | ||
291 | u8 *bw_pct) | ||
292 | { | ||
293 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
294 | |||
295 | *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; | ||
296 | } | ||
297 | |||
298 | static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, | ||
299 | u8 *prio, u8 *bwg_id, u8 *bw_pct, | ||
300 | u8 *up_map) | ||
301 | { | ||
302 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
303 | |||
304 | *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; | ||
305 | *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; | ||
306 | *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; | ||
307 | *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; | ||
308 | } | ||
309 | |||
310 | static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, | ||
311 | u8 *bw_pct) | ||
312 | { | ||
313 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
314 | |||
315 | *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; | ||
316 | } | ||
317 | |||
318 | static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, | ||
319 | u8 setting) | ||
320 | { | ||
321 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
322 | |||
323 | adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; | ||
324 | if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != | ||
325 | adapter->dcb_cfg.tc_config[priority].dcb_pfc) | ||
326 | adapter->dcb_set_bitmap |= BIT_PFC; | ||
327 | } | ||
328 | |||
329 | static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, | ||
330 | u8 *setting) | ||
331 | { | ||
332 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
333 | |||
334 | *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; | ||
335 | } | ||
336 | |||
337 | static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | ||
338 | { | ||
339 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
340 | int ret; | ||
341 | |||
342 | adapter->dcb_set_bitmap &= ~BIT_BCN; /* no set for BCN */ | ||
343 | if (!adapter->dcb_set_bitmap) | ||
344 | return 1; | ||
345 | |||
346 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | ||
347 | msleep(1); | ||
348 | |||
349 | if (netif_running(netdev)) | ||
350 | ixgbe_down(adapter); | ||
351 | |||
352 | ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, | ||
353 | adapter->ring_feature[RING_F_DCB].indices); | ||
354 | if (ret) { | ||
355 | clear_bit(__IXGBE_RESETTING, &adapter->state); | ||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | if (netif_running(netdev)) | ||
360 | ixgbe_up(adapter); | ||
361 | |||
362 | adapter->dcb_set_bitmap = 0x00; | ||
363 | clear_bit(__IXGBE_RESETTING, &adapter->state); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) | ||
368 | { | ||
369 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
370 | u8 rval = 0; | ||
371 | |||
372 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
373 | switch (capid) { | ||
374 | case DCB_CAP_ATTR_PG: | ||
375 | *cap = true; | ||
376 | break; | ||
377 | case DCB_CAP_ATTR_PFC: | ||
378 | *cap = true; | ||
379 | break; | ||
380 | case DCB_CAP_ATTR_UP2TC: | ||
381 | *cap = false; | ||
382 | break; | ||
383 | case DCB_CAP_ATTR_PG_TCS: | ||
384 | *cap = 0x80; | ||
385 | break; | ||
386 | case DCB_CAP_ATTR_PFC_TCS: | ||
387 | *cap = 0x80; | ||
388 | break; | ||
389 | case DCB_CAP_ATTR_GSP: | ||
390 | *cap = true; | ||
391 | break; | ||
392 | case DCB_CAP_ATTR_BCN: | ||
393 | *cap = false; | ||
394 | break; | ||
395 | default: | ||
396 | rval = -EINVAL; | ||
397 | break; | ||
398 | } | ||
399 | } else { | ||
400 | rval = -EINVAL; | ||
401 | } | ||
402 | |||
403 | return rval; | ||
404 | } | ||
405 | |||
406 | static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) | ||
407 | { | ||
408 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
409 | u8 rval = 0; | ||
410 | |||
411 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
412 | switch (tcid) { | ||
413 | case DCB_NUMTCS_ATTR_PG: | ||
414 | *num = MAX_TRAFFIC_CLASS; | ||
415 | break; | ||
416 | case DCB_NUMTCS_ATTR_PFC: | ||
417 | *num = MAX_TRAFFIC_CLASS; | ||
418 | break; | ||
419 | default: | ||
420 | rval = -EINVAL; | ||
421 | break; | ||
422 | } | ||
423 | } else { | ||
424 | rval = -EINVAL; | ||
425 | } | ||
426 | |||
427 | return rval; | ||
428 | } | ||
429 | |||
430 | static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) | ||
431 | { | ||
432 | return -EINVAL; | ||
433 | } | ||
434 | |||
435 | static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) | ||
436 | { | ||
437 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
438 | |||
439 | return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); | ||
440 | } | ||
441 | |||
442 | static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) | ||
443 | { | ||
444 | return; | ||
445 | } | ||
446 | |||
447 | static void ixgbe_dcbnl_getbcnrp(struct net_device *netdev, int priority, | ||
448 | u8 *setting) | ||
449 | { | ||
450 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
451 | |||
452 | *setting = adapter->dcb_cfg.bcn.rp_admin_mode[priority]; | ||
453 | } | ||
454 | |||
455 | |||
456 | static void ixgbe_dcbnl_getbcncfg(struct net_device *netdev, int enum_index, | ||
457 | u32 *setting) | ||
458 | { | ||
459 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
460 | |||
461 | switch (enum_index) { | ||
462 | case DCB_BCN_ATTR_BCNA_0: | ||
463 | *setting = adapter->dcb_cfg.bcn.bcna_option[0]; | ||
464 | break; | ||
465 | case DCB_BCN_ATTR_BCNA_1: | ||
466 | *setting = adapter->dcb_cfg.bcn.bcna_option[1]; | ||
467 | break; | ||
468 | case DCB_BCN_ATTR_ALPHA: | ||
469 | *setting = adapter->dcb_cfg.bcn.rp_alpha; | ||
470 | break; | ||
471 | case DCB_BCN_ATTR_BETA: | ||
472 | *setting = adapter->dcb_cfg.bcn.rp_beta; | ||
473 | break; | ||
474 | case DCB_BCN_ATTR_GD: | ||
475 | *setting = adapter->dcb_cfg.bcn.rp_gd; | ||
476 | break; | ||
477 | case DCB_BCN_ATTR_GI: | ||
478 | *setting = adapter->dcb_cfg.bcn.rp_gi; | ||
479 | break; | ||
480 | case DCB_BCN_ATTR_TMAX: | ||
481 | *setting = adapter->dcb_cfg.bcn.rp_tmax; | ||
482 | break; | ||
483 | case DCB_BCN_ATTR_TD: | ||
484 | *setting = adapter->dcb_cfg.bcn.rp_td; | ||
485 | break; | ||
486 | case DCB_BCN_ATTR_RMIN: | ||
487 | *setting = adapter->dcb_cfg.bcn.rp_rmin; | ||
488 | break; | ||
489 | case DCB_BCN_ATTR_W: | ||
490 | *setting = adapter->dcb_cfg.bcn.rp_w; | ||
491 | break; | ||
492 | case DCB_BCN_ATTR_RD: | ||
493 | *setting = adapter->dcb_cfg.bcn.rp_rd; | ||
494 | break; | ||
495 | case DCB_BCN_ATTR_RU: | ||
496 | *setting = adapter->dcb_cfg.bcn.rp_ru; | ||
497 | break; | ||
498 | case DCB_BCN_ATTR_WRTT: | ||
499 | *setting = adapter->dcb_cfg.bcn.rp_wrtt; | ||
500 | break; | ||
501 | case DCB_BCN_ATTR_RI: | ||
502 | *setting = adapter->dcb_cfg.bcn.rp_ri; | ||
503 | break; | ||
504 | default: | ||
505 | *setting = -1; | ||
506 | } | ||
507 | } | ||
508 | |||
509 | static void ixgbe_dcbnl_setbcnrp(struct net_device *netdev, int priority, | ||
510 | u8 setting) | ||
511 | { | ||
512 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
513 | |||
514 | adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] = setting; | ||
515 | |||
516 | if (adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] != | ||
517 | adapter->dcb_cfg.bcn.rp_admin_mode[priority]) | ||
518 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
519 | } | ||
520 | |||
521 | static void ixgbe_dcbnl_setbcncfg(struct net_device *netdev, int enum_index, | ||
522 | u32 setting) | ||
523 | { | ||
524 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
525 | |||
526 | switch (enum_index) { | ||
527 | case DCB_BCN_ATTR_BCNA_0: | ||
528 | adapter->temp_dcb_cfg.bcn.bcna_option[0] = setting; | ||
529 | if (adapter->temp_dcb_cfg.bcn.bcna_option[0] != | ||
530 | adapter->dcb_cfg.bcn.bcna_option[0]) | ||
531 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
532 | break; | ||
533 | case DCB_BCN_ATTR_BCNA_1: | ||
534 | adapter->temp_dcb_cfg.bcn.bcna_option[1] = setting; | ||
535 | if (adapter->temp_dcb_cfg.bcn.bcna_option[1] != | ||
536 | adapter->dcb_cfg.bcn.bcna_option[1]) | ||
537 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
538 | break; | ||
539 | case DCB_BCN_ATTR_ALPHA: | ||
540 | adapter->temp_dcb_cfg.bcn.rp_alpha = setting; | ||
541 | if (adapter->temp_dcb_cfg.bcn.rp_alpha != | ||
542 | adapter->dcb_cfg.bcn.rp_alpha) | ||
543 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
544 | break; | ||
545 | case DCB_BCN_ATTR_BETA: | ||
546 | adapter->temp_dcb_cfg.bcn.rp_beta = setting; | ||
547 | if (adapter->temp_dcb_cfg.bcn.rp_beta != | ||
548 | adapter->dcb_cfg.bcn.rp_beta) | ||
549 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
550 | break; | ||
551 | case DCB_BCN_ATTR_GD: | ||
552 | adapter->temp_dcb_cfg.bcn.rp_gd = setting; | ||
553 | if (adapter->temp_dcb_cfg.bcn.rp_gd != | ||
554 | adapter->dcb_cfg.bcn.rp_gd) | ||
555 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
556 | break; | ||
557 | case DCB_BCN_ATTR_GI: | ||
558 | adapter->temp_dcb_cfg.bcn.rp_gi = setting; | ||
559 | if (adapter->temp_dcb_cfg.bcn.rp_gi != | ||
560 | adapter->dcb_cfg.bcn.rp_gi) | ||
561 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
562 | break; | ||
563 | case DCB_BCN_ATTR_TMAX: | ||
564 | adapter->temp_dcb_cfg.bcn.rp_tmax = setting; | ||
565 | if (adapter->temp_dcb_cfg.bcn.rp_tmax != | ||
566 | adapter->dcb_cfg.bcn.rp_tmax) | ||
567 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
568 | break; | ||
569 | case DCB_BCN_ATTR_TD: | ||
570 | adapter->temp_dcb_cfg.bcn.rp_td = setting; | ||
571 | if (adapter->temp_dcb_cfg.bcn.rp_td != | ||
572 | adapter->dcb_cfg.bcn.rp_td) | ||
573 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
574 | break; | ||
575 | case DCB_BCN_ATTR_RMIN: | ||
576 | adapter->temp_dcb_cfg.bcn.rp_rmin = setting; | ||
577 | if (adapter->temp_dcb_cfg.bcn.rp_rmin != | ||
578 | adapter->dcb_cfg.bcn.rp_rmin) | ||
579 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
580 | break; | ||
581 | case DCB_BCN_ATTR_W: | ||
582 | adapter->temp_dcb_cfg.bcn.rp_w = setting; | ||
583 | if (adapter->temp_dcb_cfg.bcn.rp_w != | ||
584 | adapter->dcb_cfg.bcn.rp_w) | ||
585 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
586 | break; | ||
587 | case DCB_BCN_ATTR_RD: | ||
588 | adapter->temp_dcb_cfg.bcn.rp_rd = setting; | ||
589 | if (adapter->temp_dcb_cfg.bcn.rp_rd != | ||
590 | adapter->dcb_cfg.bcn.rp_rd) | ||
591 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
592 | break; | ||
593 | case DCB_BCN_ATTR_RU: | ||
594 | adapter->temp_dcb_cfg.bcn.rp_ru = setting; | ||
595 | if (adapter->temp_dcb_cfg.bcn.rp_ru != | ||
596 | adapter->dcb_cfg.bcn.rp_ru) | ||
597 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
598 | break; | ||
599 | case DCB_BCN_ATTR_WRTT: | ||
600 | adapter->temp_dcb_cfg.bcn.rp_wrtt = setting; | ||
601 | if (adapter->temp_dcb_cfg.bcn.rp_wrtt != | ||
602 | adapter->dcb_cfg.bcn.rp_wrtt) | ||
603 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
604 | break; | ||
605 | case DCB_BCN_ATTR_RI: | ||
606 | adapter->temp_dcb_cfg.bcn.rp_ri = setting; | ||
607 | if (adapter->temp_dcb_cfg.bcn.rp_ri != | ||
608 | adapter->dcb_cfg.bcn.rp_ri) | ||
609 | adapter->dcb_set_bitmap |= BIT_BCN; | ||
610 | break; | ||
611 | default: | ||
612 | break; | ||
613 | } | ||
614 | } | ||
615 | |||
616 | struct dcbnl_rtnl_ops dcbnl_ops = { | ||
617 | .getstate = ixgbe_dcbnl_get_state, | ||
618 | .setstate = ixgbe_dcbnl_set_state, | ||
619 | .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, | ||
620 | .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, | ||
621 | .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, | ||
622 | .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, | ||
623 | .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, | ||
624 | .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, | ||
625 | .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, | ||
626 | .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, | ||
627 | .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, | ||
628 | .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, | ||
629 | .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, | ||
630 | .setall = ixgbe_dcbnl_set_all, | ||
631 | .getcap = ixgbe_dcbnl_getcap, | ||
632 | .getnumtcs = ixgbe_dcbnl_getnumtcs, | ||
633 | .setnumtcs = ixgbe_dcbnl_setnumtcs, | ||
634 | .getpfcstate = ixgbe_dcbnl_getpfcstate, | ||
635 | .setpfcstate = ixgbe_dcbnl_setpfcstate, | ||
636 | .getbcncfg = ixgbe_dcbnl_getbcncfg, | ||
637 | .getbcnrp = ixgbe_dcbnl_getbcnrp, | ||
638 | .setbcncfg = ixgbe_dcbnl_setbcncfg, | ||
639 | .setbcnrp = ixgbe_dcbnl_setbcnrp | ||
640 | }; | ||
641 | |||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 81a9c4b86726..67f87a79154d 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -94,12 +94,21 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
94 | }; | 94 | }; |
95 | 95 | ||
96 | #define IXGBE_QUEUE_STATS_LEN \ | 96 | #define IXGBE_QUEUE_STATS_LEN \ |
97 | ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ | 97 | ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ |
98 | ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ | 98 | ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ |
99 | (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) | 99 | (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) |
100 | #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) | ||
101 | #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) | 100 | #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) |
102 | #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) | 101 | #define IXGBE_PB_STATS_LEN ( \ |
102 | (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ | ||
103 | IXGBE_FLAG_DCB_ENABLED) ? \ | ||
104 | (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ | ||
105 | sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ | ||
106 | sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ | ||
107 | sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ | ||
108 | / sizeof(u64) : 0) | ||
109 | #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ | ||
110 | IXGBE_PB_STATS_LEN + \ | ||
111 | IXGBE_QUEUE_STATS_LEN) | ||
103 | 112 | ||
104 | static int ixgbe_get_settings(struct net_device *netdev, | 113 | static int ixgbe_get_settings(struct net_device *netdev, |
105 | struct ethtool_cmd *ecmd) | 114 | struct ethtool_cmd *ecmd) |
@@ -149,6 +158,8 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
149 | { | 158 | { |
150 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 159 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
151 | struct ixgbe_hw *hw = &adapter->hw; | 160 | struct ixgbe_hw *hw = &adapter->hw; |
161 | u32 advertised, old; | ||
162 | s32 err; | ||
152 | 163 | ||
153 | switch (hw->phy.media_type) { | 164 | switch (hw->phy.media_type) { |
154 | case ixgbe_media_type_fiber: | 165 | case ixgbe_media_type_fiber: |
@@ -157,6 +168,31 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
157 | return -EINVAL; | 168 | return -EINVAL; |
158 | /* in this case we currently only support 10Gb/FULL */ | 169 | /* in this case we currently only support 10Gb/FULL */ |
159 | break; | 170 | break; |
171 | case ixgbe_media_type_copper: | ||
172 | /* 10000/copper and 1000/copper must autoneg | ||
173 | * this function does not support any duplex forcing, but can | ||
174 | * limit the advertising of the adapter to only 10000 or 1000 */ | ||
175 | if (ecmd->autoneg == AUTONEG_DISABLE) | ||
176 | return -EINVAL; | ||
177 | |||
178 | old = hw->phy.autoneg_advertised; | ||
179 | advertised = 0; | ||
180 | if (ecmd->advertising & ADVERTISED_10000baseT_Full) | ||
181 | advertised |= IXGBE_LINK_SPEED_10GB_FULL; | ||
182 | |||
183 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) | ||
184 | advertised |= IXGBE_LINK_SPEED_1GB_FULL; | ||
185 | |||
186 | if (old == advertised) | ||
187 | break; | ||
188 | /* this sets the link speed and restarts auto-neg */ | ||
189 | err = hw->mac.ops.setup_link_speed(hw, advertised, true, true); | ||
190 | if (err) { | ||
191 | DPRINTK(PROBE, INFO, | ||
192 | "setup link failed with code %d\n", err); | ||
193 | hw->mac.ops.setup_link_speed(hw, old, true, true); | ||
194 | } | ||
195 | break; | ||
160 | default: | 196 | default: |
161 | break; | 197 | break; |
162 | } | 198 | } |
@@ -676,30 +712,15 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
676 | return 0; | 712 | return 0; |
677 | } | 713 | } |
678 | 714 | ||
679 | if (adapter->num_tx_queues > adapter->num_rx_queues) | 715 | temp_ring = kcalloc(adapter->num_tx_queues, |
680 | temp_ring = vmalloc(adapter->num_tx_queues * | 716 | sizeof(struct ixgbe_ring), GFP_KERNEL); |
681 | sizeof(struct ixgbe_ring)); | ||
682 | else | ||
683 | temp_ring = vmalloc(adapter->num_rx_queues * | ||
684 | sizeof(struct ixgbe_ring)); | ||
685 | if (!temp_ring) | 717 | if (!temp_ring) |
686 | return -ENOMEM; | 718 | return -ENOMEM; |
687 | 719 | ||
688 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 720 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
689 | msleep(1); | 721 | msleep(1); |
690 | 722 | ||
691 | if (netif_running(netdev)) | ||
692 | ixgbe_down(adapter); | ||
693 | |||
694 | /* | ||
695 | * We can't just free everything and then setup again, | ||
696 | * because the ISRs in MSI-X mode get passed pointers | ||
697 | * to the tx and rx ring structs. | ||
698 | */ | ||
699 | if (new_tx_count != adapter->tx_ring->count) { | 723 | if (new_tx_count != adapter->tx_ring->count) { |
700 | memcpy(temp_ring, adapter->tx_ring, | ||
701 | adapter->num_tx_queues * sizeof(struct ixgbe_ring)); | ||
702 | |||
703 | for (i = 0; i < adapter->num_tx_queues; i++) { | 724 | for (i = 0; i < adapter->num_tx_queues; i++) { |
704 | temp_ring[i].count = new_tx_count; | 725 | temp_ring[i].count = new_tx_count; |
705 | err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); | 726 | err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); |
@@ -711,21 +732,28 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
711 | } | 732 | } |
712 | goto err_setup; | 733 | goto err_setup; |
713 | } | 734 | } |
735 | temp_ring[i].v_idx = adapter->tx_ring[i].v_idx; | ||
714 | } | 736 | } |
715 | 737 | if (netif_running(netdev)) | |
716 | for (i = 0; i < adapter->num_tx_queues; i++) | 738 | netdev->netdev_ops->ndo_stop(netdev); |
717 | ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); | 739 | ixgbe_reset_interrupt_capability(adapter); |
718 | 740 | ixgbe_napi_del_all(adapter); | |
719 | memcpy(adapter->tx_ring, temp_ring, | 741 | INIT_LIST_HEAD(&netdev->napi_list); |
720 | adapter->num_tx_queues * sizeof(struct ixgbe_ring)); | 742 | kfree(adapter->tx_ring); |
721 | 743 | adapter->tx_ring = temp_ring; | |
744 | temp_ring = NULL; | ||
722 | adapter->tx_ring_count = new_tx_count; | 745 | adapter->tx_ring_count = new_tx_count; |
723 | } | 746 | } |
724 | 747 | ||
725 | if (new_rx_count != adapter->rx_ring->count) { | 748 | temp_ring = kcalloc(adapter->num_rx_queues, |
726 | memcpy(temp_ring, adapter->rx_ring, | 749 | sizeof(struct ixgbe_ring), GFP_KERNEL); |
727 | adapter->num_rx_queues * sizeof(struct ixgbe_ring)); | 750 | if (!temp_ring) { |
751 | if (netif_running(netdev)) | ||
752 | netdev->netdev_ops->ndo_open(netdev); | ||
753 | return -ENOMEM; | ||
754 | } | ||
728 | 755 | ||
756 | if (new_rx_count != adapter->rx_ring->count) { | ||
729 | for (i = 0; i < adapter->num_rx_queues; i++) { | 757 | for (i = 0; i < adapter->num_rx_queues; i++) { |
730 | temp_ring[i].count = new_rx_count; | 758 | temp_ring[i].count = new_rx_count; |
731 | err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); | 759 | err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); |
@@ -737,13 +765,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
737 | } | 765 | } |
738 | goto err_setup; | 766 | goto err_setup; |
739 | } | 767 | } |
768 | temp_ring[i].v_idx = adapter->rx_ring[i].v_idx; | ||
740 | } | 769 | } |
741 | 770 | if (netif_running(netdev)) | |
742 | for (i = 0; i < adapter->num_rx_queues; i++) | 771 | netdev->netdev_ops->ndo_stop(netdev); |
743 | ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); | 772 | ixgbe_reset_interrupt_capability(adapter); |
744 | 773 | ixgbe_napi_del_all(adapter); | |
745 | memcpy(adapter->rx_ring, temp_ring, | 774 | INIT_LIST_HEAD(&netdev->napi_list); |
746 | adapter->num_rx_queues * sizeof(struct ixgbe_ring)); | 775 | kfree(adapter->rx_ring); |
776 | adapter->rx_ring = temp_ring; | ||
777 | temp_ring = NULL; | ||
747 | 778 | ||
748 | adapter->rx_ring_count = new_rx_count; | 779 | adapter->rx_ring_count = new_rx_count; |
749 | } | 780 | } |
@@ -751,8 +782,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
751 | /* success! */ | 782 | /* success! */ |
752 | err = 0; | 783 | err = 0; |
753 | err_setup: | 784 | err_setup: |
785 | ixgbe_init_interrupt_scheme(adapter); | ||
754 | if (netif_running(netdev)) | 786 | if (netif_running(netdev)) |
755 | ixgbe_up(adapter); | 787 | netdev->netdev_ops->ndo_open(netdev); |
756 | 788 | ||
757 | clear_bit(__IXGBE_RESETTING, &adapter->state); | 789 | clear_bit(__IXGBE_RESETTING, &adapter->state); |
758 | return err; | 790 | return err; |
@@ -804,6 +836,16 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
804 | data[i + k] = queue_stat[k]; | 836 | data[i + k] = queue_stat[k]; |
805 | i += k; | 837 | i += k; |
806 | } | 838 | } |
839 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
840 | for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { | ||
841 | data[i++] = adapter->stats.pxontxc[j]; | ||
842 | data[i++] = adapter->stats.pxofftxc[j]; | ||
843 | } | ||
844 | for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { | ||
845 | data[i++] = adapter->stats.pxonrxc[j]; | ||
846 | data[i++] = adapter->stats.pxoffrxc[j]; | ||
847 | } | ||
848 | } | ||
807 | } | 849 | } |
808 | 850 | ||
809 | static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | 851 | static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, |
@@ -832,6 +874,20 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | |||
832 | sprintf(p, "rx_queue_%u_bytes", i); | 874 | sprintf(p, "rx_queue_%u_bytes", i); |
833 | p += ETH_GSTRING_LEN; | 875 | p += ETH_GSTRING_LEN; |
834 | } | 876 | } |
877 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
878 | for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { | ||
879 | sprintf(p, "tx_pb_%u_pxon", i); | ||
880 | p += ETH_GSTRING_LEN; | ||
881 | sprintf(p, "tx_pb_%u_pxoff", i); | ||
882 | p += ETH_GSTRING_LEN; | ||
883 | } | ||
884 | for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { | ||
885 | sprintf(p, "rx_pb_%u_pxon", i); | ||
886 | p += ETH_GSTRING_LEN; | ||
887 | sprintf(p, "rx_pb_%u_pxoff", i); | ||
888 | p += ETH_GSTRING_LEN; | ||
889 | } | ||
890 | } | ||
835 | /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ | 891 | /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ |
836 | break; | 892 | break; |
837 | } | 893 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5236f633ee36..acef3c65cd2c 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -68,12 +68,20 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
68 | board_82598 }, | 68 | board_82598 }, |
69 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), | 69 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), |
70 | board_82598 }, | 70 | board_82598 }, |
71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), | ||
72 | board_82598 }, | ||
71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), | 73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), |
72 | board_82598 }, | 74 | board_82598 }, |
73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), | 75 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), |
74 | board_82598 }, | 76 | board_82598 }, |
77 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), | ||
78 | board_82598 }, | ||
79 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), | ||
80 | board_82598 }, | ||
75 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), | 81 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), |
76 | board_82598 }, | 82 | board_82598 }, |
83 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), | ||
84 | board_82598 }, | ||
77 | 85 | ||
78 | /* required last entry */ | 86 | /* required last entry */ |
79 | {0, } | 87 | {0, } |
@@ -402,7 +410,7 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, | |||
402 | 410 | ||
403 | if (adapter->netdev->features & NETIF_F_LRO && | 411 | if (adapter->netdev->features & NETIF_F_LRO && |
404 | skb->ip_summed == CHECKSUM_UNNECESSARY) { | 412 | skb->ip_summed == CHECKSUM_UNNECESSARY) { |
405 | if (adapter->vlgrp && is_vlan) | 413 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
406 | lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, | 414 | lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, |
407 | adapter->vlgrp, tag, | 415 | adapter->vlgrp, tag, |
408 | rx_desc); | 416 | rx_desc); |
@@ -411,12 +419,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, | |||
411 | ring->lro_used = true; | 419 | ring->lro_used = true; |
412 | } else { | 420 | } else { |
413 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 421 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
414 | if (adapter->vlgrp && is_vlan) | 422 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
415 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); | 423 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); |
416 | else | 424 | else |
417 | netif_receive_skb(skb); | 425 | netif_receive_skb(skb); |
418 | } else { | 426 | } else { |
419 | if (adapter->vlgrp && is_vlan) | 427 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
420 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | 428 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); |
421 | else | 429 | else |
422 | netif_rx(skb); | 430 | netif_rx(skb); |
@@ -471,7 +479,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
471 | union ixgbe_adv_rx_desc *rx_desc; | 479 | union ixgbe_adv_rx_desc *rx_desc; |
472 | struct ixgbe_rx_buffer *bi; | 480 | struct ixgbe_rx_buffer *bi; |
473 | unsigned int i; | 481 | unsigned int i; |
474 | unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; | ||
475 | 482 | ||
476 | i = rx_ring->next_to_use; | 483 | i = rx_ring->next_to_use; |
477 | bi = &rx_ring->rx_buffer_info[i]; | 484 | bi = &rx_ring->rx_buffer_info[i]; |
@@ -500,8 +507,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
500 | } | 507 | } |
501 | 508 | ||
502 | if (!bi->skb) { | 509 | if (!bi->skb) { |
503 | struct sk_buff *skb = netdev_alloc_skb(adapter->netdev, | 510 | struct sk_buff *skb; |
504 | bufsz); | 511 | skb = netdev_alloc_skb(adapter->netdev, |
512 | (rx_ring->rx_buf_len + | ||
513 | NET_IP_ALIGN)); | ||
505 | 514 | ||
506 | if (!skb) { | 515 | if (!skb) { |
507 | adapter->alloc_rx_buff_failed++; | 516 | adapter->alloc_rx_buff_failed++; |
@@ -516,7 +525,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
516 | skb_reserve(skb, NET_IP_ALIGN); | 525 | skb_reserve(skb, NET_IP_ALIGN); |
517 | 526 | ||
518 | bi->skb = skb; | 527 | bi->skb = skb; |
519 | bi->dma = pci_map_single(pdev, skb->data, bufsz, | 528 | bi->dma = pci_map_single(pdev, skb->data, |
529 | rx_ring->rx_buf_len, | ||
520 | PCI_DMA_FROMDEVICE); | 530 | PCI_DMA_FROMDEVICE); |
521 | } | 531 | } |
522 | /* Refresh the desc even if buffer_addrs didn't change because | 532 | /* Refresh the desc even if buffer_addrs didn't change because |
@@ -607,7 +617,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
607 | 617 | ||
608 | if (len && !skb_shinfo(skb)->nr_frags) { | 618 | if (len && !skb_shinfo(skb)->nr_frags) { |
609 | pci_unmap_single(pdev, rx_buffer_info->dma, | 619 | pci_unmap_single(pdev, rx_buffer_info->dma, |
610 | rx_ring->rx_buf_len + NET_IP_ALIGN, | 620 | rx_ring->rx_buf_len, |
611 | PCI_DMA_FROMDEVICE); | 621 | PCI_DMA_FROMDEVICE); |
612 | skb_put(skb, len); | 622 | skb_put(skb, len); |
613 | } | 623 | } |
@@ -666,7 +676,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
666 | 676 | ||
667 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 677 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
668 | ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); | 678 | ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); |
669 | adapter->netdev->last_rx = jiffies; | ||
670 | 679 | ||
671 | next_desc: | 680 | next_desc: |
672 | rx_desc->wb.upper.status_error = 0; | 681 | rx_desc->wb.upper.status_error = 0; |
@@ -904,6 +913,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
904 | return; | 913 | return; |
905 | } | 914 | } |
906 | 915 | ||
916 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) | ||
917 | { | ||
918 | struct ixgbe_hw *hw = &adapter->hw; | ||
919 | |||
920 | if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && | ||
921 | (eicr & IXGBE_EICR_GPI_SDP1)) { | ||
922 | DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); | ||
923 | /* write to clear the interrupt */ | ||
924 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | ||
925 | } | ||
926 | } | ||
907 | 927 | ||
908 | static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) | 928 | static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) |
909 | { | 929 | { |
@@ -928,6 +948,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
928 | if (eicr & IXGBE_EICR_LSC) | 948 | if (eicr & IXGBE_EICR_LSC) |
929 | ixgbe_check_lsc(adapter); | 949 | ixgbe_check_lsc(adapter); |
930 | 950 | ||
951 | ixgbe_check_fan_failure(adapter, eicr); | ||
952 | |||
931 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 953 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
932 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | 954 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); |
933 | 955 | ||
@@ -990,7 +1012,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
990 | rx_ring = &(adapter->rx_ring[r_idx]); | 1012 | rx_ring = &(adapter->rx_ring[r_idx]); |
991 | /* disable interrupts on this vector only */ | 1013 | /* disable interrupts on this vector only */ |
992 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); | 1014 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); |
993 | netif_rx_schedule(adapter->netdev, &q_vector->napi); | 1015 | netif_rx_schedule(&q_vector->napi); |
994 | 1016 | ||
995 | return IRQ_HANDLED; | 1017 | return IRQ_HANDLED; |
996 | } | 1018 | } |
@@ -1031,7 +1053,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1031 | 1053 | ||
1032 | /* If all Rx work done, exit the polling mode */ | 1054 | /* If all Rx work done, exit the polling mode */ |
1033 | if (work_done < budget) { | 1055 | if (work_done < budget) { |
1034 | netif_rx_complete(adapter->netdev, napi); | 1056 | netif_rx_complete(napi); |
1035 | if (adapter->itr_setting & 3) | 1057 | if (adapter->itr_setting & 3) |
1036 | ixgbe_set_itr_msix(q_vector); | 1058 | ixgbe_set_itr_msix(q_vector); |
1037 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1059 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
@@ -1080,7 +1102,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | |||
1080 | rx_ring = &(adapter->rx_ring[r_idx]); | 1102 | rx_ring = &(adapter->rx_ring[r_idx]); |
1081 | /* If all Rx work done, exit the polling mode */ | 1103 | /* If all Rx work done, exit the polling mode */ |
1082 | if (work_done < budget) { | 1104 | if (work_done < budget) { |
1083 | netif_rx_complete(adapter->netdev, napi); | 1105 | netif_rx_complete(napi); |
1084 | if (adapter->itr_setting & 3) | 1106 | if (adapter->itr_setting & 3) |
1085 | ixgbe_set_itr_msix(q_vector); | 1107 | ixgbe_set_itr_msix(q_vector); |
1086 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1108 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
@@ -1187,6 +1209,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1187 | struct net_device *netdev = adapter->netdev; | 1209 | struct net_device *netdev = adapter->netdev; |
1188 | irqreturn_t (*handler)(int, void *); | 1210 | irqreturn_t (*handler)(int, void *); |
1189 | int i, vector, q_vectors, err; | 1211 | int i, vector, q_vectors, err; |
1212 | int ri=0, ti=0; | ||
1190 | 1213 | ||
1191 | /* Decrement for Other and TCP Timer vectors */ | 1214 | /* Decrement for Other and TCP Timer vectors */ |
1192 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1215 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
@@ -1201,10 +1224,19 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1201 | &ixgbe_msix_clean_many) | 1224 | &ixgbe_msix_clean_many) |
1202 | for (vector = 0; vector < q_vectors; vector++) { | 1225 | for (vector = 0; vector < q_vectors; vector++) { |
1203 | handler = SET_HANDLER(&adapter->q_vector[vector]); | 1226 | handler = SET_HANDLER(&adapter->q_vector[vector]); |
1204 | sprintf(adapter->name[vector], "%s:v%d-%s", | 1227 | |
1205 | netdev->name, vector, | 1228 | if(handler == &ixgbe_msix_clean_rx) { |
1206 | (handler == &ixgbe_msix_clean_rx) ? "Rx" : | 1229 | sprintf(adapter->name[vector], "%s-%s-%d", |
1207 | ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); | 1230 | netdev->name, "rx", ri++); |
1231 | } | ||
1232 | else if(handler == &ixgbe_msix_clean_tx) { | ||
1233 | sprintf(adapter->name[vector], "%s-%s-%d", | ||
1234 | netdev->name, "tx", ti++); | ||
1235 | } | ||
1236 | else | ||
1237 | sprintf(adapter->name[vector], "%s-%s-%d", | ||
1238 | netdev->name, "TxRx", vector); | ||
1239 | |||
1208 | err = request_irq(adapter->msix_entries[vector].vector, | 1240 | err = request_irq(adapter->msix_entries[vector].vector, |
1209 | handler, 0, adapter->name[vector], | 1241 | handler, 0, adapter->name[vector], |
1210 | &(adapter->q_vector[vector])); | 1242 | &(adapter->q_vector[vector])); |
@@ -1312,6 +1344,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1312 | { | 1344 | { |
1313 | u32 mask; | 1345 | u32 mask; |
1314 | mask = IXGBE_EIMS_ENABLE_MASK; | 1346 | mask = IXGBE_EIMS_ENABLE_MASK; |
1347 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | ||
1348 | mask |= IXGBE_EIMS_GPI_SDP1; | ||
1315 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1349 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1316 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1350 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1317 | } | 1351 | } |
@@ -1342,13 +1376,15 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1342 | if (eicr & IXGBE_EICR_LSC) | 1376 | if (eicr & IXGBE_EICR_LSC) |
1343 | ixgbe_check_lsc(adapter); | 1377 | ixgbe_check_lsc(adapter); |
1344 | 1378 | ||
1345 | if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { | 1379 | ixgbe_check_fan_failure(adapter, eicr); |
1380 | |||
1381 | if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) { | ||
1346 | adapter->tx_ring[0].total_packets = 0; | 1382 | adapter->tx_ring[0].total_packets = 0; |
1347 | adapter->tx_ring[0].total_bytes = 0; | 1383 | adapter->tx_ring[0].total_bytes = 0; |
1348 | adapter->rx_ring[0].total_packets = 0; | 1384 | adapter->rx_ring[0].total_packets = 0; |
1349 | adapter->rx_ring[0].total_bytes = 0; | 1385 | adapter->rx_ring[0].total_bytes = 0; |
1350 | /* would disable interrupts here but EIAM disabled it */ | 1386 | /* would disable interrupts here but EIAM disabled it */ |
1351 | __netif_rx_schedule(netdev, &adapter->q_vector[0].napi); | 1387 | __netif_rx_schedule(&adapter->q_vector[0].napi); |
1352 | } | 1388 | } |
1353 | 1389 | ||
1354 | return IRQ_HANDLED; | 1390 | return IRQ_HANDLED; |
@@ -1651,10 +1687,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1651 | * effects of setting this bit are only that SRRCTL must be | 1687 | * effects of setting this bit are only that SRRCTL must be |
1652 | * fully programmed [0..15] | 1688 | * fully programmed [0..15] |
1653 | */ | 1689 | */ |
1654 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | 1690 | if (adapter->flags & |
1655 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; | 1691 | (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { |
1656 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 1692 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
1657 | 1693 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; | |
1694 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | ||
1695 | } | ||
1658 | 1696 | ||
1659 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 1697 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
1660 | /* Fill out redirection table */ | 1698 | /* Fill out redirection table */ |
@@ -1713,6 +1751,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, | |||
1713 | ixgbe_irq_disable(adapter); | 1751 | ixgbe_irq_disable(adapter); |
1714 | adapter->vlgrp = grp; | 1752 | adapter->vlgrp = grp; |
1715 | 1753 | ||
1754 | /* | ||
1755 | * For a DCB driver, always enable VLAN tag stripping so we can | ||
1756 | * still receive traffic from a DCB-enabled host even if we're | ||
1757 | * not in DCB mode. | ||
1758 | */ | ||
1759 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); | ||
1760 | ctrl |= IXGBE_VLNCTRL_VME; | ||
1761 | ctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
1762 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); | ||
1763 | |||
1716 | if (grp) { | 1764 | if (grp) { |
1717 | /* enable VLAN tag insert/strip */ | 1765 | /* enable VLAN tag insert/strip */ |
1718 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); | 1766 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); |
@@ -1877,6 +1925,44 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | |||
1877 | } | 1925 | } |
1878 | } | 1926 | } |
1879 | 1927 | ||
1928 | #ifdef CONFIG_IXGBE_DCB | ||
1929 | /* | ||
1930 | * ixgbe_configure_dcb - Configure DCB hardware | ||
1931 | * @adapter: ixgbe adapter struct | ||
1932 | * | ||
1933 | * This is called by the driver on open to configure the DCB hardware. | ||
1934 | * This is also called by the gennetlink interface when reconfiguring | ||
1935 | * the DCB state. | ||
1936 | */ | ||
1937 | static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | ||
1938 | { | ||
1939 | struct ixgbe_hw *hw = &adapter->hw; | ||
1940 | u32 txdctl, vlnctrl; | ||
1941 | int i, j; | ||
1942 | |||
1943 | ixgbe_dcb_check_config(&adapter->dcb_cfg); | ||
1944 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); | ||
1945 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); | ||
1946 | |||
1947 | /* reconfigure the hardware */ | ||
1948 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); | ||
1949 | |||
1950 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1951 | j = adapter->tx_ring[i].reg_idx; | ||
1952 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
1953 | /* PThresh workaround for Tx hang with DFP enabled. */ | ||
1954 | txdctl |= 32; | ||
1955 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | ||
1956 | } | ||
1957 | /* Enable VLAN tag insert/strip */ | ||
1958 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
1959 | vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; | ||
1960 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
1961 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
1962 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | ||
1963 | } | ||
1964 | |||
1965 | #endif | ||
1880 | static void ixgbe_configure(struct ixgbe_adapter *adapter) | 1966 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
1881 | { | 1967 | { |
1882 | struct net_device *netdev = adapter->netdev; | 1968 | struct net_device *netdev = adapter->netdev; |
@@ -1885,6 +1971,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
1885 | ixgbe_set_rx_mode(netdev); | 1971 | ixgbe_set_rx_mode(netdev); |
1886 | 1972 | ||
1887 | ixgbe_restore_vlan(adapter); | 1973 | ixgbe_restore_vlan(adapter); |
1974 | #ifdef CONFIG_IXGBE_DCB | ||
1975 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
1976 | netif_set_gso_max_size(netdev, 32768); | ||
1977 | ixgbe_configure_dcb(adapter); | ||
1978 | } else { | ||
1979 | netif_set_gso_max_size(netdev, 65536); | ||
1980 | } | ||
1981 | #else | ||
1982 | netif_set_gso_max_size(netdev, 65536); | ||
1983 | #endif | ||
1888 | 1984 | ||
1889 | ixgbe_configure_tx(adapter); | 1985 | ixgbe_configure_tx(adapter); |
1890 | ixgbe_configure_rx(adapter); | 1986 | ixgbe_configure_rx(adapter); |
@@ -1924,6 +2020,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
1924 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | 2020 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
1925 | } | 2021 | } |
1926 | 2022 | ||
2023 | /* Enable fan failure interrupt if media type is copper */ | ||
2024 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | ||
2025 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | ||
2026 | gpie |= IXGBE_SDP1_GPIEN; | ||
2027 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
2028 | } | ||
2029 | |||
1927 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | 2030 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); |
1928 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { | 2031 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { |
1929 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | 2032 | mhadd &= ~IXGBE_MHADD_MFS_MASK; |
@@ -1961,6 +2064,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
1961 | else | 2064 | else |
1962 | ixgbe_configure_msi_and_legacy(adapter); | 2065 | ixgbe_configure_msi_and_legacy(adapter); |
1963 | 2066 | ||
2067 | ixgbe_napi_add_all(adapter); | ||
2068 | |||
1964 | clear_bit(__IXGBE_DOWN, &adapter->state); | 2069 | clear_bit(__IXGBE_DOWN, &adapter->state); |
1965 | ixgbe_napi_enable_all(adapter); | 2070 | ixgbe_napi_enable_all(adapter); |
1966 | 2071 | ||
@@ -2205,7 +2310,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
2205 | 2310 | ||
2206 | /* If budget not fully consumed, exit the polling mode */ | 2311 | /* If budget not fully consumed, exit the polling mode */ |
2207 | if (work_done < budget) { | 2312 | if (work_done < budget) { |
2208 | netif_rx_complete(adapter->netdev, napi); | 2313 | netif_rx_complete(napi); |
2209 | if (adapter->itr_setting & 3) | 2314 | if (adapter->itr_setting & 3) |
2210 | ixgbe_set_itr(adapter); | 2315 | ixgbe_set_itr(adapter); |
2211 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2316 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
@@ -2231,6 +2336,11 @@ static void ixgbe_reset_task(struct work_struct *work) | |||
2231 | struct ixgbe_adapter *adapter; | 2336 | struct ixgbe_adapter *adapter; |
2232 | adapter = container_of(work, struct ixgbe_adapter, reset_task); | 2337 | adapter = container_of(work, struct ixgbe_adapter, reset_task); |
2233 | 2338 | ||
2339 | /* If we're already down or resetting, just bail */ | ||
2340 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
2341 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
2342 | return; | ||
2343 | |||
2234 | adapter->tx_timeout_count++; | 2344 | adapter->tx_timeout_count++; |
2235 | 2345 | ||
2236 | ixgbe_reinit_locked(adapter); | 2346 | ixgbe_reinit_locked(adapter); |
@@ -2240,15 +2350,31 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
2240 | { | 2350 | { |
2241 | int nrq = 1, ntq = 1; | 2351 | int nrq = 1, ntq = 1; |
2242 | int feature_mask = 0, rss_i, rss_m; | 2352 | int feature_mask = 0, rss_i, rss_m; |
2353 | int dcb_i, dcb_m; | ||
2243 | 2354 | ||
2244 | /* Number of supported queues */ | 2355 | /* Number of supported queues */ |
2245 | switch (adapter->hw.mac.type) { | 2356 | switch (adapter->hw.mac.type) { |
2246 | case ixgbe_mac_82598EB: | 2357 | case ixgbe_mac_82598EB: |
2358 | dcb_i = adapter->ring_feature[RING_F_DCB].indices; | ||
2359 | dcb_m = 0; | ||
2247 | rss_i = adapter->ring_feature[RING_F_RSS].indices; | 2360 | rss_i = adapter->ring_feature[RING_F_RSS].indices; |
2248 | rss_m = 0; | 2361 | rss_m = 0; |
2249 | feature_mask |= IXGBE_FLAG_RSS_ENABLED; | 2362 | feature_mask |= IXGBE_FLAG_RSS_ENABLED; |
2363 | feature_mask |= IXGBE_FLAG_DCB_ENABLED; | ||
2250 | 2364 | ||
2251 | switch (adapter->flags & feature_mask) { | 2365 | switch (adapter->flags & feature_mask) { |
2366 | case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): | ||
2367 | dcb_m = 0x7 << 3; | ||
2368 | rss_i = min(8, rss_i); | ||
2369 | rss_m = 0x7; | ||
2370 | nrq = dcb_i * rss_i; | ||
2371 | ntq = min(MAX_TX_QUEUES, dcb_i * rss_i); | ||
2372 | break; | ||
2373 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2374 | dcb_m = 0x7 << 3; | ||
2375 | nrq = dcb_i; | ||
2376 | ntq = dcb_i; | ||
2377 | break; | ||
2252 | case (IXGBE_FLAG_RSS_ENABLED): | 2378 | case (IXGBE_FLAG_RSS_ENABLED): |
2253 | rss_m = 0xF; | 2379 | rss_m = 0xF; |
2254 | nrq = rss_i; | 2380 | nrq = rss_i; |
@@ -2256,6 +2382,8 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
2256 | break; | 2382 | break; |
2257 | case 0: | 2383 | case 0: |
2258 | default: | 2384 | default: |
2385 | dcb_i = 0; | ||
2386 | dcb_m = 0; | ||
2259 | rss_i = 0; | 2387 | rss_i = 0; |
2260 | rss_m = 0; | 2388 | rss_m = 0; |
2261 | nrq = 1; | 2389 | nrq = 1; |
@@ -2263,6 +2391,12 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
2263 | break; | 2391 | break; |
2264 | } | 2392 | } |
2265 | 2393 | ||
2394 | /* Sanity check, we should never have zero queues */ | ||
2395 | nrq = (nrq ?:1); | ||
2396 | ntq = (ntq ?:1); | ||
2397 | |||
2398 | adapter->ring_feature[RING_F_DCB].indices = dcb_i; | ||
2399 | adapter->ring_feature[RING_F_DCB].mask = dcb_m; | ||
2266 | adapter->ring_feature[RING_F_RSS].indices = rss_i; | 2400 | adapter->ring_feature[RING_F_RSS].indices = rss_i; |
2267 | adapter->ring_feature[RING_F_RSS].mask = rss_m; | 2401 | adapter->ring_feature[RING_F_RSS].mask = rss_m; |
2268 | break; | 2402 | break; |
@@ -2314,6 +2448,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
2314 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 2448 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2315 | kfree(adapter->msix_entries); | 2449 | kfree(adapter->msix_entries); |
2316 | adapter->msix_entries = NULL; | 2450 | adapter->msix_entries = NULL; |
2451 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
2317 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 2452 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
2318 | ixgbe_set_num_queues(adapter); | 2453 | ixgbe_set_num_queues(adapter); |
2319 | } else { | 2454 | } else { |
@@ -2333,15 +2468,42 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
2333 | { | 2468 | { |
2334 | int feature_mask = 0, rss_i; | 2469 | int feature_mask = 0, rss_i; |
2335 | int i, txr_idx, rxr_idx; | 2470 | int i, txr_idx, rxr_idx; |
2471 | int dcb_i; | ||
2336 | 2472 | ||
2337 | /* Number of supported queues */ | 2473 | /* Number of supported queues */ |
2338 | switch (adapter->hw.mac.type) { | 2474 | switch (adapter->hw.mac.type) { |
2339 | case ixgbe_mac_82598EB: | 2475 | case ixgbe_mac_82598EB: |
2476 | dcb_i = adapter->ring_feature[RING_F_DCB].indices; | ||
2340 | rss_i = adapter->ring_feature[RING_F_RSS].indices; | 2477 | rss_i = adapter->ring_feature[RING_F_RSS].indices; |
2341 | txr_idx = 0; | 2478 | txr_idx = 0; |
2342 | rxr_idx = 0; | 2479 | rxr_idx = 0; |
2480 | feature_mask |= IXGBE_FLAG_DCB_ENABLED; | ||
2343 | feature_mask |= IXGBE_FLAG_RSS_ENABLED; | 2481 | feature_mask |= IXGBE_FLAG_RSS_ENABLED; |
2344 | switch (adapter->flags & feature_mask) { | 2482 | switch (adapter->flags & feature_mask) { |
2483 | case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED): | ||
2484 | for (i = 0; i < dcb_i; i++) { | ||
2485 | int j; | ||
2486 | /* Rx first */ | ||
2487 | for (j = 0; j < adapter->num_rx_queues; j++) { | ||
2488 | adapter->rx_ring[rxr_idx].reg_idx = | ||
2489 | i << 3 | j; | ||
2490 | rxr_idx++; | ||
2491 | } | ||
2492 | /* Tx now */ | ||
2493 | for (j = 0; j < adapter->num_tx_queues; j++) { | ||
2494 | adapter->tx_ring[txr_idx].reg_idx = | ||
2495 | i << 2 | (j >> 1); | ||
2496 | if (j & 1) | ||
2497 | txr_idx++; | ||
2498 | } | ||
2499 | } | ||
2500 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2501 | /* the number of queues is assumed to be symmetric */ | ||
2502 | for (i = 0; i < dcb_i; i++) { | ||
2503 | adapter->rx_ring[i].reg_idx = i << 3; | ||
2504 | adapter->tx_ring[i].reg_idx = i << 2; | ||
2505 | } | ||
2506 | break; | ||
2345 | case (IXGBE_FLAG_RSS_ENABLED): | 2507 | case (IXGBE_FLAG_RSS_ENABLED): |
2346 | for (i = 0; i < adapter->num_rx_queues; i++) | 2508 | for (i = 0; i < adapter->num_rx_queues; i++) |
2347 | adapter->rx_ring[i].reg_idx = i; | 2509 | adapter->rx_ring[i].reg_idx = i; |
@@ -2363,8 +2525,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
2363 | * @adapter: board private structure to initialize | 2525 | * @adapter: board private structure to initialize |
2364 | * | 2526 | * |
2365 | * We allocate one ring per queue at run-time since we don't know the | 2527 | * We allocate one ring per queue at run-time since we don't know the |
2366 | * number of queues at compile-time. The polling_netdev array is | 2528 | * number of queues at compile-time. |
2367 | * intended for Multiqueue, but should work fine with a single queue. | ||
2368 | **/ | 2529 | **/ |
2369 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | 2530 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) |
2370 | { | 2531 | { |
@@ -2435,6 +2596,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
2435 | adapter->msix_entries = kcalloc(v_budget, | 2596 | adapter->msix_entries = kcalloc(v_budget, |
2436 | sizeof(struct msix_entry), GFP_KERNEL); | 2597 | sizeof(struct msix_entry), GFP_KERNEL); |
2437 | if (!adapter->msix_entries) { | 2598 | if (!adapter->msix_entries) { |
2599 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
2438 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 2600 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
2439 | ixgbe_set_num_queues(adapter); | 2601 | ixgbe_set_num_queues(adapter); |
2440 | kfree(adapter->tx_ring); | 2602 | kfree(adapter->tx_ring); |
@@ -2475,7 +2637,7 @@ out: | |||
2475 | return err; | 2637 | return err; |
2476 | } | 2638 | } |
2477 | 2639 | ||
2478 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | 2640 | void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) |
2479 | { | 2641 | { |
2480 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2642 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
2481 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 2643 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
@@ -2499,7 +2661,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | |||
2499 | * - Hardware queue count (num_*_queues) | 2661 | * - Hardware queue count (num_*_queues) |
2500 | * - defined by miscellaneous hardware support/features (RSS, etc.) | 2662 | * - defined by miscellaneous hardware support/features (RSS, etc.) |
2501 | **/ | 2663 | **/ |
2502 | static int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | 2664 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) |
2503 | { | 2665 | { |
2504 | int err; | 2666 | int err; |
2505 | 2667 | ||
@@ -2535,6 +2697,57 @@ err_alloc_queues: | |||
2535 | } | 2697 | } |
2536 | 2698 | ||
2537 | /** | 2699 | /** |
2700 | * ixgbe_sfp_timer - worker thread to find a missing module | ||
2701 | * @data: pointer to our adapter struct | ||
2702 | **/ | ||
2703 | static void ixgbe_sfp_timer(unsigned long data) | ||
2704 | { | ||
2705 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
2706 | |||
2707 | /* Do the sfp_timer outside of interrupt context due to the | ||
2708 | * delays that sfp+ detection requires | ||
2709 | */ | ||
2710 | schedule_work(&adapter->sfp_task); | ||
2711 | } | ||
2712 | |||
2713 | /** | ||
2714 | * ixgbe_sfp_task - worker thread to find a missing module | ||
2715 | * @work: pointer to work_struct containing our data | ||
2716 | **/ | ||
2717 | static void ixgbe_sfp_task(struct work_struct *work) | ||
2718 | { | ||
2719 | struct ixgbe_adapter *adapter = container_of(work, | ||
2720 | struct ixgbe_adapter, | ||
2721 | sfp_task); | ||
2722 | struct ixgbe_hw *hw = &adapter->hw; | ||
2723 | |||
2724 | if ((hw->phy.type == ixgbe_phy_nl) && | ||
2725 | (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { | ||
2726 | s32 ret = hw->phy.ops.identify_sfp(hw); | ||
2727 | if (ret) | ||
2728 | goto reschedule; | ||
2729 | ret = hw->phy.ops.reset(hw); | ||
2730 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
2731 | DPRINTK(PROBE, ERR, "failed to initialize because an " | ||
2732 | "unsupported SFP+ module type was detected.\n" | ||
2733 | "Reload the driver after installing a " | ||
2734 | "supported module.\n"); | ||
2735 | unregister_netdev(adapter->netdev); | ||
2736 | } else { | ||
2737 | DPRINTK(PROBE, INFO, "detected SFP+: %d\n", | ||
2738 | hw->phy.sfp_type); | ||
2739 | } | ||
2740 | /* don't need this routine any more */ | ||
2741 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
2742 | } | ||
2743 | return; | ||
2744 | reschedule: | ||
2745 | if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) | ||
2746 | mod_timer(&adapter->sfp_timer, | ||
2747 | round_jiffies(jiffies + (2 * HZ))); | ||
2748 | } | ||
2749 | |||
2750 | /** | ||
2538 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) | 2751 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) |
2539 | * @adapter: board private structure to initialize | 2752 | * @adapter: board private structure to initialize |
2540 | * | 2753 | * |
@@ -2547,6 +2760,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2547 | struct ixgbe_hw *hw = &adapter->hw; | 2760 | struct ixgbe_hw *hw = &adapter->hw; |
2548 | struct pci_dev *pdev = adapter->pdev; | 2761 | struct pci_dev *pdev = adapter->pdev; |
2549 | unsigned int rss; | 2762 | unsigned int rss; |
2763 | #ifdef CONFIG_IXGBE_DCB | ||
2764 | int j; | ||
2765 | struct tc_configuration *tc; | ||
2766 | #endif | ||
2550 | 2767 | ||
2551 | /* PCI config space info */ | 2768 | /* PCI config space info */ |
2552 | 2769 | ||
@@ -2560,6 +2777,30 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
2560 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); | 2777 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); |
2561 | adapter->ring_feature[RING_F_RSS].indices = rss; | 2778 | adapter->ring_feature[RING_F_RSS].indices = rss; |
2562 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 2779 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
2780 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | ||
2781 | |||
2782 | #ifdef CONFIG_IXGBE_DCB | ||
2783 | /* Configure DCB traffic classes */ | ||
2784 | for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { | ||
2785 | tc = &adapter->dcb_cfg.tc_config[j]; | ||
2786 | tc->path[DCB_TX_CONFIG].bwg_id = 0; | ||
2787 | tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); | ||
2788 | tc->path[DCB_RX_CONFIG].bwg_id = 0; | ||
2789 | tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); | ||
2790 | tc->dcb_pfc = pfc_disabled; | ||
2791 | } | ||
2792 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; | ||
2793 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; | ||
2794 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; | ||
2795 | adapter->dcb_cfg.round_robin_enable = false; | ||
2796 | adapter->dcb_set_bitmap = 0x00; | ||
2797 | ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, | ||
2798 | adapter->ring_feature[RING_F_DCB].indices); | ||
2799 | |||
2800 | #endif | ||
2801 | if (hw->mac.ops.get_media_type && | ||
2802 | (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) | ||
2803 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | ||
2563 | 2804 | ||
2564 | /* default flow control settings */ | 2805 | /* default flow control settings */ |
2565 | hw->fc.original_type = ixgbe_fc_none; | 2806 | hw->fc.original_type = ixgbe_fc_none; |
@@ -2934,11 +3175,16 @@ static int ixgbe_close(struct net_device *netdev) | |||
2934 | * @adapter: private struct | 3175 | * @adapter: private struct |
2935 | * helper function to napi_add each possible q_vector->napi | 3176 | * helper function to napi_add each possible q_vector->napi |
2936 | */ | 3177 | */ |
2937 | static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | 3178 | void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) |
2938 | { | 3179 | { |
2939 | int q_idx, q_vectors; | 3180 | int q_idx, q_vectors; |
3181 | struct net_device *netdev = adapter->netdev; | ||
2940 | int (*poll)(struct napi_struct *, int); | 3182 | int (*poll)(struct napi_struct *, int); |
2941 | 3183 | ||
3184 | /* check if we already have our netdev->napi_list populated */ | ||
3185 | if (&netdev->napi_list != netdev->napi_list.next) | ||
3186 | return; | ||
3187 | |||
2942 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 3188 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
2943 | poll = &ixgbe_clean_rxonly; | 3189 | poll = &ixgbe_clean_rxonly; |
2944 | /* Only enable as many vectors as we have rx queues. */ | 3190 | /* Only enable as many vectors as we have rx queues. */ |
@@ -2955,7 +3201,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | |||
2955 | } | 3201 | } |
2956 | } | 3202 | } |
2957 | 3203 | ||
2958 | static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) | 3204 | void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) |
2959 | { | 3205 | { |
2960 | int q_idx; | 3206 | int q_idx; |
2961 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 3207 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
@@ -3032,6 +3278,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3032 | } | 3278 | } |
3033 | ixgbe_reset_interrupt_capability(adapter); | 3279 | ixgbe_reset_interrupt_capability(adapter); |
3034 | ixgbe_napi_del_all(adapter); | 3280 | ixgbe_napi_del_all(adapter); |
3281 | INIT_LIST_HEAD(&netdev->napi_list); | ||
3035 | kfree(adapter->tx_ring); | 3282 | kfree(adapter->tx_ring); |
3036 | kfree(adapter->rx_ring); | 3283 | kfree(adapter->rx_ring); |
3037 | 3284 | ||
@@ -3076,6 +3323,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
3076 | adapter->stats.mpc[i] += mpc; | 3323 | adapter->stats.mpc[i] += mpc; |
3077 | total_mpc += adapter->stats.mpc[i]; | 3324 | total_mpc += adapter->stats.mpc[i]; |
3078 | adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); | 3325 | adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); |
3326 | adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); | ||
3327 | adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); | ||
3328 | adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); | ||
3329 | adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); | ||
3330 | adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, | ||
3331 | IXGBE_PXONRXC(i)); | ||
3332 | adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, | ||
3333 | IXGBE_PXONTXC(i)); | ||
3334 | adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, | ||
3335 | IXGBE_PXOFFRXC(i)); | ||
3336 | adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, | ||
3337 | IXGBE_PXOFFTXC(i)); | ||
3079 | } | 3338 | } |
3080 | adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); | 3339 | adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); |
3081 | /* work around hardware counting issue */ | 3340 | /* work around hardware counting issue */ |
@@ -3204,15 +3463,16 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
3204 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); | 3463 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); |
3205 | #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) | 3464 | #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) |
3206 | #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) | 3465 | #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) |
3207 | DPRINTK(LINK, INFO, "NIC Link is Up %s, " | 3466 | printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " |
3208 | "Flow Control: %s\n", | 3467 | "Flow Control: %s\n", |
3209 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? | 3468 | netdev->name, |
3210 | "10 Gbps" : | 3469 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? |
3211 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | 3470 | "10 Gbps" : |
3212 | "1 Gbps" : "unknown speed")), | 3471 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? |
3213 | ((FLOW_RX && FLOW_TX) ? "RX/TX" : | 3472 | "1 Gbps" : "unknown speed")), |
3214 | (FLOW_RX ? "RX" : | 3473 | ((FLOW_RX && FLOW_TX) ? "RX/TX" : |
3215 | (FLOW_TX ? "TX" : "None")))); | 3474 | (FLOW_RX ? "RX" : |
3475 | (FLOW_TX ? "TX" : "None")))); | ||
3216 | 3476 | ||
3217 | netif_carrier_on(netdev); | 3477 | netif_carrier_on(netdev); |
3218 | netif_tx_wake_all_queues(netdev); | 3478 | netif_tx_wake_all_queues(netdev); |
@@ -3224,7 +3484,8 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
3224 | adapter->link_up = false; | 3484 | adapter->link_up = false; |
3225 | adapter->link_speed = 0; | 3485 | adapter->link_speed = 0; |
3226 | if (netif_carrier_ok(netdev)) { | 3486 | if (netif_carrier_ok(netdev)) { |
3227 | DPRINTK(LINK, INFO, "NIC Link is Down\n"); | 3487 | printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", |
3488 | netdev->name); | ||
3228 | netif_carrier_off(netdev); | 3489 | netif_carrier_off(netdev); |
3229 | netif_tx_stop_all_queues(netdev); | 3490 | netif_tx_stop_all_queues(netdev); |
3230 | } | 3491 | } |
@@ -3573,6 +3834,14 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3573 | 3834 | ||
3574 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | 3835 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { |
3575 | tx_flags |= vlan_tx_tag_get(skb); | 3836 | tx_flags |= vlan_tx_tag_get(skb); |
3837 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
3838 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; | ||
3839 | tx_flags |= (skb->queue_mapping << 13); | ||
3840 | } | ||
3841 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
3842 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | ||
3843 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
3844 | tx_flags |= (skb->queue_mapping << 13); | ||
3576 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 3845 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
3577 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 3846 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
3578 | } | 3847 | } |
@@ -3687,9 +3956,31 @@ static int ixgbe_link_config(struct ixgbe_hw *hw) | |||
3687 | /* must always autoneg for both 1G and 10G link */ | 3956 | /* must always autoneg for both 1G and 10G link */ |
3688 | hw->mac.autoneg = true; | 3957 | hw->mac.autoneg = true; |
3689 | 3958 | ||
3959 | if ((hw->mac.type == ixgbe_mac_82598EB) && | ||
3960 | (hw->phy.media_type == ixgbe_media_type_copper)) | ||
3961 | autoneg = IXGBE_LINK_SPEED_82598_AUTONEG; | ||
3962 | |||
3690 | return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); | 3963 | return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); |
3691 | } | 3964 | } |
3692 | 3965 | ||
3966 | static const struct net_device_ops ixgbe_netdev_ops = { | ||
3967 | .ndo_open = ixgbe_open, | ||
3968 | .ndo_stop = ixgbe_close, | ||
3969 | .ndo_start_xmit = ixgbe_xmit_frame, | ||
3970 | .ndo_get_stats = ixgbe_get_stats, | ||
3971 | .ndo_set_multicast_list = ixgbe_set_rx_mode, | ||
3972 | .ndo_validate_addr = eth_validate_addr, | ||
3973 | .ndo_set_mac_address = ixgbe_set_mac, | ||
3974 | .ndo_change_mtu = ixgbe_change_mtu, | ||
3975 | .ndo_tx_timeout = ixgbe_tx_timeout, | ||
3976 | .ndo_vlan_rx_register = ixgbe_vlan_rx_register, | ||
3977 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, | ||
3978 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, | ||
3979 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3980 | .ndo_poll_controller = ixgbe_netpoll, | ||
3981 | #endif | ||
3982 | }; | ||
3983 | |||
3693 | /** | 3984 | /** |
3694 | * ixgbe_probe - Device Initialization Routine | 3985 | * ixgbe_probe - Device Initialization Routine |
3695 | * @pdev: PCI device information struct | 3986 | * @pdev: PCI device information struct |
@@ -3739,6 +4030,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3739 | goto err_pci_reg; | 4030 | goto err_pci_reg; |
3740 | } | 4031 | } |
3741 | 4032 | ||
4033 | err = pci_enable_pcie_error_reporting(pdev); | ||
4034 | if (err) { | ||
4035 | dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed " | ||
4036 | "0x%x\n", err); | ||
4037 | /* non-fatal, continue */ | ||
4038 | } | ||
4039 | |||
3742 | pci_set_master(pdev); | 4040 | pci_set_master(pdev); |
3743 | pci_save_state(pdev); | 4041 | pci_save_state(pdev); |
3744 | 4042 | ||
@@ -3771,23 +4069,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3771 | continue; | 4069 | continue; |
3772 | } | 4070 | } |
3773 | 4071 | ||
3774 | netdev->open = &ixgbe_open; | 4072 | netdev->netdev_ops = &ixgbe_netdev_ops; |
3775 | netdev->stop = &ixgbe_close; | ||
3776 | netdev->hard_start_xmit = &ixgbe_xmit_frame; | ||
3777 | netdev->get_stats = &ixgbe_get_stats; | ||
3778 | netdev->set_rx_mode = &ixgbe_set_rx_mode; | ||
3779 | netdev->set_multicast_list = &ixgbe_set_rx_mode; | ||
3780 | netdev->set_mac_address = &ixgbe_set_mac; | ||
3781 | netdev->change_mtu = &ixgbe_change_mtu; | ||
3782 | ixgbe_set_ethtool_ops(netdev); | 4073 | ixgbe_set_ethtool_ops(netdev); |
3783 | netdev->tx_timeout = &ixgbe_tx_timeout; | ||
3784 | netdev->watchdog_timeo = 5 * HZ; | 4074 | netdev->watchdog_timeo = 5 * HZ; |
3785 | netdev->vlan_rx_register = ixgbe_vlan_rx_register; | ||
3786 | netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; | ||
3787 | netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; | ||
3788 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3789 | netdev->poll_controller = ixgbe_netpoll; | ||
3790 | #endif | ||
3791 | strcpy(netdev->name, pci_name(pdev)); | 4075 | strcpy(netdev->name, pci_name(pdev)); |
3792 | 4076 | ||
3793 | adapter->bd_number = cards_found; | 4077 | adapter->bd_number = cards_found; |
@@ -3805,11 +4089,31 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3805 | 4089 | ||
3806 | /* PHY */ | 4090 | /* PHY */ |
3807 | memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); | 4091 | memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); |
3808 | /* phy->sfp_type = ixgbe_sfp_type_unknown; */ | 4092 | hw->phy.sfp_type = ixgbe_sfp_type_unknown; |
4093 | |||
4094 | /* set up this timer and work struct before calling get_invariants | ||
4095 | * which might start the timer | ||
4096 | */ | ||
4097 | init_timer(&adapter->sfp_timer); | ||
4098 | adapter->sfp_timer.function = &ixgbe_sfp_timer; | ||
4099 | adapter->sfp_timer.data = (unsigned long) adapter; | ||
4100 | |||
4101 | INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); | ||
3809 | 4102 | ||
3810 | err = ii->get_invariants(hw); | 4103 | err = ii->get_invariants(hw); |
3811 | if (err) | 4104 | if (err == IXGBE_ERR_SFP_NOT_PRESENT) { |
4105 | /* start a kernel thread to watch for a module to arrive */ | ||
4106 | set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
4107 | mod_timer(&adapter->sfp_timer, | ||
4108 | round_jiffies(jiffies + (2 * HZ))); | ||
4109 | err = 0; | ||
4110 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
4111 | DPRINTK(PROBE, ERR, "failed to load because an " | ||
4112 | "unsupported SFP+ module type was detected.\n"); | ||
3812 | goto err_hw_init; | 4113 | goto err_hw_init; |
4114 | } else if (err) { | ||
4115 | goto err_hw_init; | ||
4116 | } | ||
3813 | 4117 | ||
3814 | /* setup the private structure */ | 4118 | /* setup the private structure */ |
3815 | err = ixgbe_sw_init(adapter); | 4119 | err = ixgbe_sw_init(adapter); |
@@ -3839,6 +4143,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3839 | netdev->vlan_features |= NETIF_F_IP_CSUM; | 4143 | netdev->vlan_features |= NETIF_F_IP_CSUM; |
3840 | netdev->vlan_features |= NETIF_F_SG; | 4144 | netdev->vlan_features |= NETIF_F_SG; |
3841 | 4145 | ||
4146 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
4147 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
4148 | |||
4149 | #ifdef CONFIG_IXGBE_DCB | ||
4150 | netdev->dcbnl_ops = &dcbnl_ops; | ||
4151 | #endif | ||
4152 | |||
3842 | if (pci_using_dac) | 4153 | if (pci_using_dac) |
3843 | netdev->features |= NETIF_F_HIGHDMA; | 4154 | netdev->features |= NETIF_F_HIGHDMA; |
3844 | 4155 | ||
@@ -3873,8 +4184,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3873 | pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); | 4184 | pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); |
3874 | link_speed = link_status & IXGBE_PCI_LINK_SPEED; | 4185 | link_speed = link_status & IXGBE_PCI_LINK_SPEED; |
3875 | link_width = link_status & IXGBE_PCI_LINK_WIDTH; | 4186 | link_width = link_status & IXGBE_PCI_LINK_WIDTH; |
3876 | dev_info(&pdev->dev, "(PCI Express:%s:%s) " | 4187 | dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", |
3877 | "%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
3878 | ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : | 4188 | ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : |
3879 | (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : | 4189 | (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : |
3880 | "Unknown"), | 4190 | "Unknown"), |
@@ -3883,8 +4193,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3883 | (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : | 4193 | (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : |
3884 | (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : | 4194 | (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : |
3885 | "Unknown"), | 4195 | "Unknown"), |
3886 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | 4196 | netdev->dev_addr); |
3887 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); | ||
3888 | ixgbe_read_pba_num_generic(hw, &part_num); | 4197 | ixgbe_read_pba_num_generic(hw, &part_num); |
3889 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", | 4198 | dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", |
3890 | hw->mac.type, hw->phy.type, | 4199 | hw->mac.type, hw->phy.type, |
@@ -3911,8 +4220,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3911 | netif_carrier_off(netdev); | 4220 | netif_carrier_off(netdev); |
3912 | netif_tx_stop_all_queues(netdev); | 4221 | netif_tx_stop_all_queues(netdev); |
3913 | 4222 | ||
3914 | ixgbe_napi_add_all(adapter); | ||
3915 | |||
3916 | strcpy(netdev->name, "eth%d"); | 4223 | strcpy(netdev->name, "eth%d"); |
3917 | err = register_netdev(netdev); | 4224 | err = register_netdev(netdev); |
3918 | if (err) | 4225 | if (err) |
@@ -3938,6 +4245,9 @@ err_hw_init: | |||
3938 | err_sw_init: | 4245 | err_sw_init: |
3939 | ixgbe_reset_interrupt_capability(adapter); | 4246 | ixgbe_reset_interrupt_capability(adapter); |
3940 | err_eeprom: | 4247 | err_eeprom: |
4248 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
4249 | del_timer_sync(&adapter->sfp_timer); | ||
4250 | cancel_work_sync(&adapter->sfp_task); | ||
3941 | iounmap(hw->hw_addr); | 4251 | iounmap(hw->hw_addr); |
3942 | err_ioremap: | 4252 | err_ioremap: |
3943 | free_netdev(netdev); | 4253 | free_netdev(netdev); |
@@ -3962,10 +4272,18 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
3962 | { | 4272 | { |
3963 | struct net_device *netdev = pci_get_drvdata(pdev); | 4273 | struct net_device *netdev = pci_get_drvdata(pdev); |
3964 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 4274 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
4275 | int err; | ||
3965 | 4276 | ||
3966 | set_bit(__IXGBE_DOWN, &adapter->state); | 4277 | set_bit(__IXGBE_DOWN, &adapter->state); |
4278 | /* clear the module not found bit to make sure the worker won't | ||
4279 | * reschedule | ||
4280 | */ | ||
4281 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
3967 | del_timer_sync(&adapter->watchdog_timer); | 4282 | del_timer_sync(&adapter->watchdog_timer); |
3968 | 4283 | ||
4284 | del_timer_sync(&adapter->sfp_timer); | ||
4285 | cancel_work_sync(&adapter->watchdog_task); | ||
4286 | cancel_work_sync(&adapter->sfp_task); | ||
3969 | flush_scheduled_work(); | 4287 | flush_scheduled_work(); |
3970 | 4288 | ||
3971 | #ifdef CONFIG_IXGBE_DCA | 4289 | #ifdef CONFIG_IXGBE_DCA |
@@ -3976,7 +4294,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
3976 | } | 4294 | } |
3977 | 4295 | ||
3978 | #endif | 4296 | #endif |
3979 | unregister_netdev(netdev); | 4297 | if (netdev->reg_state == NETREG_REGISTERED) |
4298 | unregister_netdev(netdev); | ||
3980 | 4299 | ||
3981 | ixgbe_reset_interrupt_capability(adapter); | 4300 | ixgbe_reset_interrupt_capability(adapter); |
3982 | 4301 | ||
@@ -3986,12 +4305,16 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
3986 | pci_release_regions(pdev); | 4305 | pci_release_regions(pdev); |
3987 | 4306 | ||
3988 | DPRINTK(PROBE, INFO, "complete\n"); | 4307 | DPRINTK(PROBE, INFO, "complete\n"); |
3989 | ixgbe_napi_del_all(adapter); | ||
3990 | kfree(adapter->tx_ring); | 4308 | kfree(adapter->tx_ring); |
3991 | kfree(adapter->rx_ring); | 4309 | kfree(adapter->rx_ring); |
3992 | 4310 | ||
3993 | free_netdev(netdev); | 4311 | free_netdev(netdev); |
3994 | 4312 | ||
4313 | err = pci_disable_pcie_error_reporting(pdev); | ||
4314 | if (err) | ||
4315 | dev_err(&pdev->dev, | ||
4316 | "pci_disable_pcie_error_reporting failed 0x%x\n", err); | ||
4317 | |||
3995 | pci_disable_device(pdev); | 4318 | pci_disable_device(pdev); |
3996 | } | 4319 | } |
3997 | 4320 | ||
@@ -4007,7 +4330,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
4007 | pci_channel_state_t state) | 4330 | pci_channel_state_t state) |
4008 | { | 4331 | { |
4009 | struct net_device *netdev = pci_get_drvdata(pdev); | 4332 | struct net_device *netdev = pci_get_drvdata(pdev); |
4010 | struct ixgbe_adapter *adapter = netdev->priv; | 4333 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
4011 | 4334 | ||
4012 | netif_device_detach(netdev); | 4335 | netif_device_detach(netdev); |
4013 | 4336 | ||
@@ -4028,22 +4351,34 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
4028 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | 4351 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) |
4029 | { | 4352 | { |
4030 | struct net_device *netdev = pci_get_drvdata(pdev); | 4353 | struct net_device *netdev = pci_get_drvdata(pdev); |
4031 | struct ixgbe_adapter *adapter = netdev->priv; | 4354 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
4355 | pci_ers_result_t result; | ||
4356 | int err; | ||
4032 | 4357 | ||
4033 | if (pci_enable_device(pdev)) { | 4358 | if (pci_enable_device(pdev)) { |
4034 | DPRINTK(PROBE, ERR, | 4359 | DPRINTK(PROBE, ERR, |
4035 | "Cannot re-enable PCI device after reset.\n"); | 4360 | "Cannot re-enable PCI device after reset.\n"); |
4036 | return PCI_ERS_RESULT_DISCONNECT; | 4361 | result = PCI_ERS_RESULT_DISCONNECT; |
4037 | } | 4362 | } else { |
4038 | pci_set_master(pdev); | 4363 | pci_set_master(pdev); |
4039 | pci_restore_state(pdev); | 4364 | pci_restore_state(pdev); |
4040 | 4365 | ||
4041 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4366 | pci_enable_wake(pdev, PCI_D3hot, 0); |
4042 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4367 | pci_enable_wake(pdev, PCI_D3cold, 0); |
4043 | 4368 | ||
4044 | ixgbe_reset(adapter); | 4369 | ixgbe_reset(adapter); |
4370 | |||
4371 | result = PCI_ERS_RESULT_RECOVERED; | ||
4372 | } | ||
4045 | 4373 | ||
4046 | return PCI_ERS_RESULT_RECOVERED; | 4374 | err = pci_cleanup_aer_uncorrect_error_status(pdev); |
4375 | if (err) { | ||
4376 | dev_err(&pdev->dev, | ||
4377 | "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); | ||
4378 | /* non-fatal, continue */ | ||
4379 | } | ||
4380 | |||
4381 | return result; | ||
4047 | } | 4382 | } |
4048 | 4383 | ||
4049 | /** | 4384 | /** |
@@ -4056,7 +4391,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
4056 | static void ixgbe_io_resume(struct pci_dev *pdev) | 4391 | static void ixgbe_io_resume(struct pci_dev *pdev) |
4057 | { | 4392 | { |
4058 | struct net_device *netdev = pci_get_drvdata(pdev); | 4393 | struct net_device *netdev = pci_get_drvdata(pdev); |
4059 | struct ixgbe_adapter *adapter = netdev->priv; | 4394 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
4060 | 4395 | ||
4061 | if (netif_running(netdev)) { | 4396 | if (netif_running(netdev)) { |
4062 | if (ixgbe_up(adapter)) { | 4397 | if (ixgbe_up(adapter)) { |
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 764035a8c9a1..5a8669aedf64 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -121,9 +121,15 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) | |||
121 | enum ixgbe_phy_type phy_type; | 121 | enum ixgbe_phy_type phy_type; |
122 | 122 | ||
123 | switch (phy_id) { | 123 | switch (phy_id) { |
124 | case TN1010_PHY_ID: | ||
125 | phy_type = ixgbe_phy_tn; | ||
126 | break; | ||
124 | case QT2022_PHY_ID: | 127 | case QT2022_PHY_ID: |
125 | phy_type = ixgbe_phy_qt; | 128 | phy_type = ixgbe_phy_qt; |
126 | break; | 129 | break; |
130 | case ATH_PHY_ID: | ||
131 | phy_type = ixgbe_phy_nl; | ||
132 | break; | ||
127 | default: | 133 | default: |
128 | phy_type = ixgbe_phy_unknown; | 134 | phy_type = ixgbe_phy_unknown; |
129 | break; | 135 | break; |
@@ -426,3 +432,323 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, | |||
426 | return 0; | 432 | return 0; |
427 | } | 433 | } |
428 | 434 | ||
435 | /** | ||
436 | * ixgbe_reset_phy_nl - Performs a PHY reset | ||
437 | * @hw: pointer to hardware structure | ||
438 | **/ | ||
439 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) | ||
440 | { | ||
441 | u16 phy_offset, control, eword, edata, block_crc; | ||
442 | bool end_data = false; | ||
443 | u16 list_offset, data_offset; | ||
444 | u16 phy_data = 0; | ||
445 | s32 ret_val = 0; | ||
446 | u32 i; | ||
447 | |||
448 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, | ||
449 | IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); | ||
450 | |||
451 | /* reset the PHY and poll for completion */ | ||
452 | hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, | ||
453 | IXGBE_MDIO_PHY_XS_DEV_TYPE, | ||
454 | (phy_data | IXGBE_MDIO_PHY_XS_RESET)); | ||
455 | |||
456 | for (i = 0; i < 100; i++) { | ||
457 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, | ||
458 | IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); | ||
459 | if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) | ||
460 | break; | ||
461 | msleep(10); | ||
462 | } | ||
463 | |||
464 | if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { | ||
465 | hw_dbg(hw, "PHY reset did not complete.\n"); | ||
466 | ret_val = IXGBE_ERR_PHY; | ||
467 | goto out; | ||
468 | } | ||
469 | |||
470 | /* Get init offsets */ | ||
471 | ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, | ||
472 | &data_offset); | ||
473 | if (ret_val != 0) | ||
474 | goto out; | ||
475 | |||
476 | ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); | ||
477 | data_offset++; | ||
478 | while (!end_data) { | ||
479 | /* | ||
480 | * Read control word from PHY init contents offset | ||
481 | */ | ||
482 | ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); | ||
483 | control = (eword & IXGBE_CONTROL_MASK_NL) >> | ||
484 | IXGBE_CONTROL_SHIFT_NL; | ||
485 | edata = eword & IXGBE_DATA_MASK_NL; | ||
486 | switch (control) { | ||
487 | case IXGBE_DELAY_NL: | ||
488 | data_offset++; | ||
489 | hw_dbg(hw, "DELAY: %d MS\n", edata); | ||
490 | msleep(edata); | ||
491 | break; | ||
492 | case IXGBE_DATA_NL: | ||
493 | hw_dbg(hw, "DATA: \n"); | ||
494 | data_offset++; | ||
495 | hw->eeprom.ops.read(hw, data_offset++, | ||
496 | &phy_offset); | ||
497 | for (i = 0; i < edata; i++) { | ||
498 | hw->eeprom.ops.read(hw, data_offset, &eword); | ||
499 | hw->phy.ops.write_reg(hw, phy_offset, | ||
500 | IXGBE_TWINAX_DEV, eword); | ||
501 | hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, | ||
502 | phy_offset); | ||
503 | data_offset++; | ||
504 | phy_offset++; | ||
505 | } | ||
506 | break; | ||
507 | case IXGBE_CONTROL_NL: | ||
508 | data_offset++; | ||
509 | hw_dbg(hw, "CONTROL: \n"); | ||
510 | if (edata == IXGBE_CONTROL_EOL_NL) { | ||
511 | hw_dbg(hw, "EOL\n"); | ||
512 | end_data = true; | ||
513 | } else if (edata == IXGBE_CONTROL_SOL_NL) { | ||
514 | hw_dbg(hw, "SOL\n"); | ||
515 | } else { | ||
516 | hw_dbg(hw, "Bad control value\n"); | ||
517 | ret_val = IXGBE_ERR_PHY; | ||
518 | goto out; | ||
519 | } | ||
520 | break; | ||
521 | default: | ||
522 | hw_dbg(hw, "Bad control type\n"); | ||
523 | ret_val = IXGBE_ERR_PHY; | ||
524 | goto out; | ||
525 | } | ||
526 | } | ||
527 | |||
528 | out: | ||
529 | return ret_val; | ||
530 | } | ||
531 | |||
532 | /** | ||
533 | * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns | ||
534 | * the PHY type. | ||
535 | * @hw: pointer to hardware structure | ||
536 | * | ||
537 | * Searches for and indentifies the SFP module. Assings appropriate PHY type. | ||
538 | **/ | ||
539 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) | ||
540 | { | ||
541 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | ||
542 | u32 vendor_oui = 0; | ||
543 | u8 identifier = 0; | ||
544 | u8 comp_codes_1g = 0; | ||
545 | u8 comp_codes_10g = 0; | ||
546 | u8 oui_bytes[4] = {0, 0, 0, 0}; | ||
547 | u8 transmission_media = 0; | ||
548 | |||
549 | status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, | ||
550 | &identifier); | ||
551 | |||
552 | if (status == IXGBE_ERR_SFP_NOT_PRESENT) { | ||
553 | hw->phy.sfp_type = ixgbe_sfp_type_not_present; | ||
554 | goto out; | ||
555 | } | ||
556 | |||
557 | if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { | ||
558 | hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, | ||
559 | &comp_codes_1g); | ||
560 | hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, | ||
561 | &comp_codes_10g); | ||
562 | hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA, | ||
563 | &transmission_media); | ||
564 | |||
565 | /* ID Module | ||
566 | * ========= | ||
567 | * 0 SFP_DA_CU | ||
568 | * 1 SFP_SR | ||
569 | * 2 SFP_LR | ||
570 | */ | ||
571 | if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE) | ||
572 | hw->phy.sfp_type = ixgbe_sfp_type_da_cu; | ||
573 | else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) | ||
574 | hw->phy.sfp_type = ixgbe_sfp_type_sr; | ||
575 | else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) | ||
576 | hw->phy.sfp_type = ixgbe_sfp_type_lr; | ||
577 | else | ||
578 | hw->phy.sfp_type = ixgbe_sfp_type_unknown; | ||
579 | |||
580 | /* Determine PHY vendor */ | ||
581 | if (hw->phy.type == ixgbe_phy_unknown) { | ||
582 | hw->phy.id = identifier; | ||
583 | hw->phy.ops.read_i2c_eeprom(hw, | ||
584 | IXGBE_SFF_VENDOR_OUI_BYTE0, | ||
585 | &oui_bytes[0]); | ||
586 | hw->phy.ops.read_i2c_eeprom(hw, | ||
587 | IXGBE_SFF_VENDOR_OUI_BYTE1, | ||
588 | &oui_bytes[1]); | ||
589 | hw->phy.ops.read_i2c_eeprom(hw, | ||
590 | IXGBE_SFF_VENDOR_OUI_BYTE2, | ||
591 | &oui_bytes[2]); | ||
592 | |||
593 | vendor_oui = | ||
594 | ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | | ||
595 | (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | | ||
596 | (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); | ||
597 | |||
598 | switch (vendor_oui) { | ||
599 | case IXGBE_SFF_VENDOR_OUI_TYCO: | ||
600 | if (transmission_media & | ||
601 | IXGBE_SFF_TWIN_AX_CAPABLE) | ||
602 | hw->phy.type = ixgbe_phy_tw_tyco; | ||
603 | break; | ||
604 | case IXGBE_SFF_VENDOR_OUI_FTL: | ||
605 | hw->phy.type = ixgbe_phy_sfp_ftl; | ||
606 | break; | ||
607 | case IXGBE_SFF_VENDOR_OUI_AVAGO: | ||
608 | hw->phy.type = ixgbe_phy_sfp_avago; | ||
609 | break; | ||
610 | default: | ||
611 | if (transmission_media & | ||
612 | IXGBE_SFF_TWIN_AX_CAPABLE) | ||
613 | hw->phy.type = ixgbe_phy_tw_unknown; | ||
614 | else | ||
615 | hw->phy.type = ixgbe_phy_sfp_unknown; | ||
616 | break; | ||
617 | } | ||
618 | } | ||
619 | status = 0; | ||
620 | } | ||
621 | |||
622 | out: | ||
623 | return status; | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see | ||
628 | * if it supports a given SFP+ module type, if so it returns the offsets to the | ||
629 | * phy init sequence block. | ||
630 | * @hw: pointer to hardware structure | ||
631 | * @list_offset: offset to the SFP ID list | ||
632 | * @data_offset: offset to the SFP data block | ||
633 | **/ | ||
634 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, | ||
635 | u16 *list_offset, | ||
636 | u16 *data_offset) | ||
637 | { | ||
638 | u16 sfp_id; | ||
639 | |||
640 | if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) | ||
641 | return IXGBE_ERR_SFP_NOT_SUPPORTED; | ||
642 | |||
643 | if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) | ||
644 | return IXGBE_ERR_SFP_NOT_PRESENT; | ||
645 | |||
646 | if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && | ||
647 | (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) | ||
648 | return IXGBE_ERR_SFP_NOT_SUPPORTED; | ||
649 | |||
650 | /* Read offset to PHY init contents */ | ||
651 | hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); | ||
652 | |||
653 | if ((!*list_offset) || (*list_offset == 0xFFFF)) | ||
654 | return IXGBE_ERR_PHY; | ||
655 | |||
656 | /* Shift offset to first ID word */ | ||
657 | (*list_offset)++; | ||
658 | |||
659 | /* | ||
660 | * Find the matching SFP ID in the EEPROM | ||
661 | * and program the init sequence | ||
662 | */ | ||
663 | hw->eeprom.ops.read(hw, *list_offset, &sfp_id); | ||
664 | |||
665 | while (sfp_id != IXGBE_PHY_INIT_END_NL) { | ||
666 | if (sfp_id == hw->phy.sfp_type) { | ||
667 | (*list_offset)++; | ||
668 | hw->eeprom.ops.read(hw, *list_offset, data_offset); | ||
669 | if ((!*data_offset) || (*data_offset == 0xFFFF)) { | ||
670 | hw_dbg(hw, "SFP+ module not supported\n"); | ||
671 | return IXGBE_ERR_SFP_NOT_SUPPORTED; | ||
672 | } else { | ||
673 | break; | ||
674 | } | ||
675 | } else { | ||
676 | (*list_offset) += 2; | ||
677 | if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) | ||
678 | return IXGBE_ERR_PHY; | ||
679 | } | ||
680 | } | ||
681 | |||
682 | if (sfp_id == IXGBE_PHY_INIT_END_NL) { | ||
683 | hw_dbg(hw, "No matching SFP+ module found\n"); | ||
684 | return IXGBE_ERR_SFP_NOT_SUPPORTED; | ||
685 | } | ||
686 | |||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * ixgbe_check_phy_link_tnx - Determine link and speed status | ||
692 | * @hw: pointer to hardware structure | ||
693 | * | ||
694 | * Reads the VS1 register to determine if link is up and the current speed for | ||
695 | * the PHY. | ||
696 | **/ | ||
697 | s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, | ||
698 | bool *link_up) | ||
699 | { | ||
700 | s32 status = 0; | ||
701 | u32 time_out; | ||
702 | u32 max_time_out = 10; | ||
703 | u16 phy_link = 0; | ||
704 | u16 phy_speed = 0; | ||
705 | u16 phy_data = 0; | ||
706 | |||
707 | /* Initialize speed and link to default case */ | ||
708 | *link_up = false; | ||
709 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | ||
710 | |||
711 | /* | ||
712 | * Check current speed and link status of the PHY register. | ||
713 | * This is a vendor specific register and may have to | ||
714 | * be changed for other copper PHYs. | ||
715 | */ | ||
716 | for (time_out = 0; time_out < max_time_out; time_out++) { | ||
717 | udelay(10); | ||
718 | status = hw->phy.ops.read_reg(hw, | ||
719 | IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, | ||
720 | IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, | ||
721 | &phy_data); | ||
722 | phy_link = phy_data & | ||
723 | IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; | ||
724 | phy_speed = phy_data & | ||
725 | IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; | ||
726 | if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { | ||
727 | *link_up = true; | ||
728 | if (phy_speed == | ||
729 | IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) | ||
730 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | ||
731 | break; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | return status; | ||
736 | } | ||
737 | |||
738 | /** | ||
739 | * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version | ||
740 | * @hw: pointer to hardware structure | ||
741 | * @firmware_version: pointer to the PHY Firmware Version | ||
742 | **/ | ||
743 | s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, | ||
744 | u16 *firmware_version) | ||
745 | { | ||
746 | s32 status = 0; | ||
747 | |||
748 | status = hw->phy.ops.read_reg(hw, TNX_FW_REV, | ||
749 | IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, | ||
750 | firmware_version); | ||
751 | |||
752 | return status; | ||
753 | } | ||
754 | |||
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index 9bfe3f2b1d8f..43a97bc420f5 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h | |||
@@ -63,6 +63,18 @@ | |||
63 | #define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 | 63 | #define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 |
64 | #define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 | 64 | #define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 |
65 | 65 | ||
66 | /* I2C SDA and SCL timing parameters for standard mode */ | ||
67 | #define IXGBE_I2C_T_HD_STA 4 | ||
68 | #define IXGBE_I2C_T_LOW 5 | ||
69 | #define IXGBE_I2C_T_HIGH 4 | ||
70 | #define IXGBE_I2C_T_SU_STA 5 | ||
71 | #define IXGBE_I2C_T_HD_DATA 5 | ||
72 | #define IXGBE_I2C_T_SU_DATA 1 | ||
73 | #define IXGBE_I2C_T_RISE 1 | ||
74 | #define IXGBE_I2C_T_FALL 1 | ||
75 | #define IXGBE_I2C_T_SU_STO 4 | ||
76 | #define IXGBE_I2C_T_BUF 5 | ||
77 | |||
66 | 78 | ||
67 | s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); | 79 | s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); |
68 | s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); | 80 | s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); |
@@ -77,4 +89,17 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, | |||
77 | bool autoneg, | 89 | bool autoneg, |
78 | bool autoneg_wait_to_complete); | 90 | bool autoneg_wait_to_complete); |
79 | 91 | ||
92 | /* PHY specific */ | ||
93 | s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, | ||
94 | ixgbe_link_speed *speed, | ||
95 | bool *link_up); | ||
96 | s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, | ||
97 | u16 *firmware_version); | ||
98 | |||
99 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); | ||
100 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); | ||
101 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, | ||
102 | u16 *list_offset, | ||
103 | u16 *data_offset); | ||
104 | |||
80 | #endif /* _IXGBE_PHY_H_ */ | 105 | #endif /* _IXGBE_PHY_H_ */ |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index c6f8fa1c4e59..83a11ff9ffd1 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -36,8 +36,12 @@ | |||
36 | /* Device IDs */ | 36 | /* Device IDs */ |
37 | #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 | 37 | #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 |
38 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 | 38 | #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 |
39 | #define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB | ||
40 | #define IXGBE_DEV_ID_82598AT 0x10C8 | ||
39 | #define IXGBE_DEV_ID_82598EB_CX4 0x10DD | 41 | #define IXGBE_DEV_ID_82598EB_CX4 0x10DD |
40 | #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC | 42 | #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC |
43 | #define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 | ||
44 | #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 | ||
41 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 | 45 | #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 |
42 | 46 | ||
43 | /* General Registers */ | 47 | /* General Registers */ |
@@ -452,6 +456,7 @@ | |||
452 | #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 | 456 | #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 |
453 | #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 | 457 | #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 |
454 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ | 458 | #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ |
459 | #define IXGBE_TWINAX_DEV 1 | ||
455 | 460 | ||
456 | #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ | 461 | #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ |
457 | 462 | ||
@@ -487,12 +492,27 @@ | |||
487 | #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 | 492 | #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 |
488 | #define IXGBE_MAX_PHY_ADDR 32 | 493 | #define IXGBE_MAX_PHY_ADDR 32 |
489 | 494 | ||
490 | /* PHY IDs*/ | 495 | /* PHY IDs */ |
496 | #define TN1010_PHY_ID 0x00A19410 | ||
497 | #define TNX_FW_REV 0xB | ||
491 | #define QT2022_PHY_ID 0x0043A400 | 498 | #define QT2022_PHY_ID 0x0043A400 |
499 | #define ATH_PHY_ID 0x03429050 | ||
492 | 500 | ||
493 | /* PHY Types */ | 501 | /* PHY Types */ |
494 | #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 | 502 | #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 |
495 | 503 | ||
504 | /* Special PHY Init Routine */ | ||
505 | #define IXGBE_PHY_INIT_OFFSET_NL 0x002B | ||
506 | #define IXGBE_PHY_INIT_END_NL 0xFFFF | ||
507 | #define IXGBE_CONTROL_MASK_NL 0xF000 | ||
508 | #define IXGBE_DATA_MASK_NL 0x0FFF | ||
509 | #define IXGBE_CONTROL_SHIFT_NL 12 | ||
510 | #define IXGBE_DELAY_NL 0 | ||
511 | #define IXGBE_DATA_NL 1 | ||
512 | #define IXGBE_CONTROL_NL 0x000F | ||
513 | #define IXGBE_CONTROL_EOL_NL 0x0FFF | ||
514 | #define IXGBE_CONTROL_SOL_NL 0x0000 | ||
515 | |||
496 | /* General purpose Interrupt Enable */ | 516 | /* General purpose Interrupt Enable */ |
497 | #define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ | 517 | #define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ |
498 | #define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ | 518 | #define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ |
@@ -1202,8 +1222,10 @@ enum ixgbe_mac_type { | |||
1202 | 1222 | ||
1203 | enum ixgbe_phy_type { | 1223 | enum ixgbe_phy_type { |
1204 | ixgbe_phy_unknown = 0, | 1224 | ixgbe_phy_unknown = 0, |
1225 | ixgbe_phy_tn, | ||
1205 | ixgbe_phy_qt, | 1226 | ixgbe_phy_qt, |
1206 | ixgbe_phy_xaui, | 1227 | ixgbe_phy_xaui, |
1228 | ixgbe_phy_nl, | ||
1207 | ixgbe_phy_tw_tyco, | 1229 | ixgbe_phy_tw_tyco, |
1208 | ixgbe_phy_tw_unknown, | 1230 | ixgbe_phy_tw_unknown, |
1209 | ixgbe_phy_sfp_avago, | 1231 | ixgbe_phy_sfp_avago, |
@@ -1225,6 +1247,7 @@ enum ixgbe_sfp_type { | |||
1225 | ixgbe_sfp_type_da_cu = 0, | 1247 | ixgbe_sfp_type_da_cu = 0, |
1226 | ixgbe_sfp_type_sr = 1, | 1248 | ixgbe_sfp_type_sr = 1, |
1227 | ixgbe_sfp_type_lr = 2, | 1249 | ixgbe_sfp_type_lr = 2, |
1250 | ixgbe_sfp_type_not_present = 0xFFFE, | ||
1228 | ixgbe_sfp_type_unknown = 0xFFFF | 1251 | ixgbe_sfp_type_unknown = 0xFFFF |
1229 | }; | 1252 | }; |
1230 | 1253 | ||
@@ -1396,6 +1419,8 @@ struct ixgbe_phy_operations { | |||
1396 | s32 (*setup_link)(struct ixgbe_hw *); | 1419 | s32 (*setup_link)(struct ixgbe_hw *); |
1397 | s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, | 1420 | s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, |
1398 | bool); | 1421 | bool); |
1422 | s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); | ||
1423 | s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); | ||
1399 | s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); | 1424 | s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); |
1400 | s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); | 1425 | s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); |
1401 | s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); | 1426 | s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); |
@@ -1486,6 +1511,7 @@ struct ixgbe_info { | |||
1486 | #define IXGBE_ERR_PHY_ADDR_INVALID -17 | 1511 | #define IXGBE_ERR_PHY_ADDR_INVALID -17 |
1487 | #define IXGBE_ERR_I2C -18 | 1512 | #define IXGBE_ERR_I2C -18 |
1488 | #define IXGBE_ERR_SFP_NOT_SUPPORTED -19 | 1513 | #define IXGBE_ERR_SFP_NOT_SUPPORTED -19 |
1514 | #define IXGBE_ERR_SFP_NOT_PRESENT -20 | ||
1489 | #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF | 1515 | #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF |
1490 | 1516 | ||
1491 | #endif /* _IXGBE_TYPE_H_ */ | 1517 | #endif /* _IXGBE_TYPE_H_ */ |