diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-20 16:43:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-20 16:43:21 -0400 |
commit | 06f4e926d256d902dd9a53dcb400fd74974ce087 (patch) | |
tree | 0b438b67f5f0eff6fd617bc497a9dace6164a488 /drivers/net/ixgbe | |
parent | 8e7bfcbab3825d1b404d615cb1b54f44ff81f981 (diff) | |
parent | d93515611bbc70c2fe4db232e5feb448ed8e4cc9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1446 commits)
macvlan: fix panic if lowerdev in a bond
tg3: Add braces around 5906 workaround.
tg3: Fix NETIF_F_LOOPBACK error
macvlan: remove one synchronize_rcu() call
networking: NET_CLS_ROUTE4 depends on INET
irda: Fix error propagation in ircomm_lmp_connect_response()
irda: Kill set but unused variable 'bytes' in irlan_check_command_param()
irda: Kill set but unused variable 'clen' in ircomm_connect_indication()
rxrpc: Fix set but unused variable 'usage' in rxrpc_get_transport()
be2net: Kill set but unused variable 'req' in lancer_fw_download()
irda: Kill set but unused vars 'saddr' and 'daddr' in irlan_provider_connect_indication()
atl1c: atl1c_resume() is only used when CONFIG_PM_SLEEP is defined.
rxrpc: Fix set but unused variable 'usage' in rxrpc_get_peer().
rxrpc: Kill set but unused variable 'local' in rxrpc_UDP_error_handler()
rxrpc: Kill set but unused variable 'sp' in rxrpc_process_connection()
rxrpc: Kill set but unused variable 'sp' in rxrpc_rotate_tx_window()
pkt_sched: Kill set but unused variable 'protocol' in tc_classify()
isdn: capi: Use pr_debug() instead of ifdefs.
tg3: Update version to 3.119
tg3: Apply rx_discards fix to 5719/5720
...
Fix up trivial conflicts in arch/x86/Kconfig and net/mac80211/agg-tx.c
as per Davem.
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 111 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82598.c | 61 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82599.c | 178 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 532 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.h | 12 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82598.c | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82599.c | 77 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_82599.h | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_dcb_nl.c | 109 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 191 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 1037 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_mbx.h | 4 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 20 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_sriov.c | 100 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 218 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_x540.c | 317 |
17 files changed, 2024 insertions, 951 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 8d468028bb55..e467b20ed1f0 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -106,6 +106,7 @@ | |||
106 | #define IXGBE_MAX_VF_FUNCTIONS 64 | 106 | #define IXGBE_MAX_VF_FUNCTIONS 64 |
107 | #define IXGBE_MAX_VFTA_ENTRIES 128 | 107 | #define IXGBE_MAX_VFTA_ENTRIES 128 |
108 | #define MAX_EMULATION_MAC_ADDRS 16 | 108 | #define MAX_EMULATION_MAC_ADDRS 16 |
109 | #define IXGBE_MAX_PF_MACVLANS 15 | ||
109 | #define VMDQ_P(p) ((p) + adapter->num_vfs) | 110 | #define VMDQ_P(p) ((p) + adapter->num_vfs) |
110 | 111 | ||
111 | struct vf_data_storage { | 112 | struct vf_data_storage { |
@@ -121,6 +122,15 @@ struct vf_data_storage { | |||
121 | u16 tx_rate; | 122 | u16 tx_rate; |
122 | }; | 123 | }; |
123 | 124 | ||
125 | struct vf_macvlans { | ||
126 | struct list_head l; | ||
127 | int vf; | ||
128 | int rar_entry; | ||
129 | bool free; | ||
130 | bool is_macvlan; | ||
131 | u8 vf_macvlan[ETH_ALEN]; | ||
132 | }; | ||
133 | |||
124 | /* wrapper around a pointer to a socket buffer, | 134 | /* wrapper around a pointer to a socket buffer, |
125 | * so a DMA handle can be stored along with the buffer */ | 135 | * so a DMA handle can be stored along with the buffer */ |
126 | struct ixgbe_tx_buffer { | 136 | struct ixgbe_tx_buffer { |
@@ -331,10 +341,52 @@ struct ixgbe_q_vector { | |||
331 | 341 | ||
332 | /* board specific private data structure */ | 342 | /* board specific private data structure */ |
333 | struct ixgbe_adapter { | 343 | struct ixgbe_adapter { |
334 | struct timer_list watchdog_timer; | 344 | unsigned long state; |
345 | |||
346 | /* Some features need tri-state capability, | ||
347 | * thus the additional *_CAPABLE flags. | ||
348 | */ | ||
349 | u32 flags; | ||
350 | #define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) | ||
351 | #define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) | ||
352 | #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) | ||
353 | #define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) | ||
354 | #define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) | ||
355 | #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) | ||
356 | #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) | ||
357 | #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) | ||
358 | #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) | ||
359 | #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) | ||
360 | #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) | ||
361 | #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) | ||
362 | #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) | ||
363 | #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) | ||
364 | #define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) | ||
365 | #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) | ||
366 | #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) | ||
367 | #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) | ||
368 | #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) | ||
369 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) | ||
370 | #define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23) | ||
371 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24) | ||
372 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25) | ||
373 | #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26) | ||
374 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27) | ||
375 | #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28) | ||
376 | #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29) | ||
377 | |||
378 | u32 flags2; | ||
379 | #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) | ||
380 | #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) | ||
381 | #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) | ||
382 | #define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) | ||
383 | #define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) | ||
384 | #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) | ||
385 | #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) | ||
386 | #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) | ||
387 | |||
335 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 388 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
336 | u16 bd_number; | 389 | u16 bd_number; |
337 | struct work_struct reset_task; | ||
338 | struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; | 390 | struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; |
339 | 391 | ||
340 | /* DCB parameters */ | 392 | /* DCB parameters */ |
@@ -377,43 +429,6 @@ struct ixgbe_adapter { | |||
377 | u32 alloc_rx_page_failed; | 429 | u32 alloc_rx_page_failed; |
378 | u32 alloc_rx_buff_failed; | 430 | u32 alloc_rx_buff_failed; |
379 | 431 | ||
380 | /* Some features need tri-state capability, | ||
381 | * thus the additional *_CAPABLE flags. | ||
382 | */ | ||
383 | u32 flags; | ||
384 | #define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) | ||
385 | #define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) | ||
386 | #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) | ||
387 | #define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) | ||
388 | #define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) | ||
389 | #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) | ||
390 | #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) | ||
391 | #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) | ||
392 | #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) | ||
393 | #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) | ||
394 | #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) | ||
395 | #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) | ||
396 | #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) | ||
397 | #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) | ||
398 | #define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) | ||
399 | #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) | ||
400 | #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) | ||
401 | #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) | ||
402 | #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) | ||
403 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) | ||
404 | #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23) | ||
405 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24) | ||
406 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25) | ||
407 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26) | ||
408 | #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27) | ||
409 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28) | ||
410 | #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29) | ||
411 | #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30) | ||
412 | |||
413 | u32 flags2; | ||
414 | #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) | ||
415 | #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) | ||
416 | #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) | ||
417 | /* default to trying for four seconds */ | 432 | /* default to trying for four seconds */ |
418 | #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) | 433 | #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) |
419 | 434 | ||
@@ -434,7 +449,6 @@ struct ixgbe_adapter { | |||
434 | u32 rx_eitr_param; | 449 | u32 rx_eitr_param; |
435 | u32 tx_eitr_param; | 450 | u32 tx_eitr_param; |
436 | 451 | ||
437 | unsigned long state; | ||
438 | u64 tx_busy; | 452 | u64 tx_busy; |
439 | unsigned int tx_ring_count; | 453 | unsigned int tx_ring_count; |
440 | unsigned int rx_ring_count; | 454 | unsigned int rx_ring_count; |
@@ -443,15 +457,12 @@ struct ixgbe_adapter { | |||
443 | bool link_up; | 457 | bool link_up; |
444 | unsigned long link_check_timeout; | 458 | unsigned long link_check_timeout; |
445 | 459 | ||
446 | struct work_struct watchdog_task; | 460 | struct work_struct service_task; |
447 | struct work_struct sfp_task; | 461 | struct timer_list service_timer; |
448 | struct timer_list sfp_timer; | ||
449 | struct work_struct multispeed_fiber_task; | ||
450 | struct work_struct sfp_config_module_task; | ||
451 | u32 fdir_pballoc; | 462 | u32 fdir_pballoc; |
452 | u32 atr_sample_rate; | 463 | u32 atr_sample_rate; |
464 | unsigned long fdir_overflow; /* number of times ATR was backed off */ | ||
453 | spinlock_t fdir_perfect_lock; | 465 | spinlock_t fdir_perfect_lock; |
454 | struct work_struct fdir_reinit_task; | ||
455 | #ifdef IXGBE_FCOE | 466 | #ifdef IXGBE_FCOE |
456 | struct ixgbe_fcoe fcoe; | 467 | struct ixgbe_fcoe fcoe; |
457 | #endif /* IXGBE_FCOE */ | 468 | #endif /* IXGBE_FCOE */ |
@@ -461,7 +472,7 @@ struct ixgbe_adapter { | |||
461 | u16 eeprom_version; | 472 | u16 eeprom_version; |
462 | 473 | ||
463 | int node; | 474 | int node; |
464 | struct work_struct check_overtemp_task; | 475 | u32 led_reg; |
465 | u32 interrupt_event; | 476 | u32 interrupt_event; |
466 | char lsc_int_name[IFNAMSIZ + 9]; | 477 | char lsc_int_name[IFNAMSIZ + 9]; |
467 | 478 | ||
@@ -470,13 +481,17 @@ struct ixgbe_adapter { | |||
470 | unsigned int num_vfs; | 481 | unsigned int num_vfs; |
471 | struct vf_data_storage *vfinfo; | 482 | struct vf_data_storage *vfinfo; |
472 | int vf_rate_link_speed; | 483 | int vf_rate_link_speed; |
484 | struct vf_macvlans vf_mvs; | ||
485 | struct vf_macvlans *mv_list; | ||
486 | bool antispoofing_enabled; | ||
473 | }; | 487 | }; |
474 | 488 | ||
475 | enum ixbge_state_t { | 489 | enum ixbge_state_t { |
476 | __IXGBE_TESTING, | 490 | __IXGBE_TESTING, |
477 | __IXGBE_RESETTING, | 491 | __IXGBE_RESETTING, |
478 | __IXGBE_DOWN, | 492 | __IXGBE_DOWN, |
479 | __IXGBE_SFP_MODULE_NOT_FOUND | 493 | __IXGBE_SERVICE_SCHED, |
494 | __IXGBE_IN_SFP_INIT, | ||
480 | }; | 495 | }; |
481 | 496 | ||
482 | struct ixgbe_rsc_cb { | 497 | struct ixgbe_rsc_cb { |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 845c679c8b87..8179e5060a18 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define IXGBE_82598_RAR_ENTRIES 16 | 37 | #define IXGBE_82598_RAR_ENTRIES 16 |
38 | #define IXGBE_82598_MC_TBL_SIZE 128 | 38 | #define IXGBE_82598_MC_TBL_SIZE 128 |
39 | #define IXGBE_82598_VFT_TBL_SIZE 128 | 39 | #define IXGBE_82598_VFT_TBL_SIZE 128 |
40 | #define IXGBE_82598_RX_PB_SIZE 512 | ||
40 | 41 | ||
41 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, | 42 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, |
42 | ixgbe_link_speed speed, | 43 | ixgbe_link_speed speed, |
@@ -197,14 +198,35 @@ out: | |||
197 | * @hw: pointer to hardware structure | 198 | * @hw: pointer to hardware structure |
198 | * | 199 | * |
199 | * Starts the hardware using the generic start_hw function. | 200 | * Starts the hardware using the generic start_hw function. |
200 | * Then set pcie completion timeout | 201 | * Disables relaxed ordering Then set pcie completion timeout |
202 | * | ||
201 | **/ | 203 | **/ |
202 | static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) | 204 | static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) |
203 | { | 205 | { |
206 | u32 regval; | ||
207 | u32 i; | ||
204 | s32 ret_val = 0; | 208 | s32 ret_val = 0; |
205 | 209 | ||
206 | ret_val = ixgbe_start_hw_generic(hw); | 210 | ret_val = ixgbe_start_hw_generic(hw); |
207 | 211 | ||
212 | /* Disable relaxed ordering */ | ||
213 | for (i = 0; ((i < hw->mac.max_tx_queues) && | ||
214 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { | ||
215 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); | ||
216 | regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
217 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); | ||
218 | } | ||
219 | |||
220 | for (i = 0; ((i < hw->mac.max_rx_queues) && | ||
221 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { | ||
222 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | ||
223 | regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | ||
224 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | ||
225 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | ||
226 | } | ||
227 | |||
228 | hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE; | ||
229 | |||
208 | /* set the completion timeout for interface */ | 230 | /* set the completion timeout for interface */ |
209 | if (ret_val == 0) | 231 | if (ret_val == 0) |
210 | ixgbe_set_pcie_completion_timeout(hw); | 232 | ixgbe_set_pcie_completion_timeout(hw); |
@@ -1064,7 +1086,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, | |||
1064 | sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; | 1086 | sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; |
1065 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) | 1087 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) |
1066 | break; | 1088 | break; |
1067 | msleep(10); | 1089 | usleep_range(10000, 20000); |
1068 | } | 1090 | } |
1069 | 1091 | ||
1070 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { | 1092 | if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { |
@@ -1188,6 +1210,38 @@ out: | |||
1188 | return physical_layer; | 1210 | return physical_layer; |
1189 | } | 1211 | } |
1190 | 1212 | ||
1213 | /** | ||
1214 | * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple | ||
1215 | * port devices. | ||
1216 | * @hw: pointer to the HW structure | ||
1217 | * | ||
1218 | * Calls common function and corrects issue with some single port devices | ||
1219 | * that enable LAN1 but not LAN0. | ||
1220 | **/ | ||
1221 | static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) | ||
1222 | { | ||
1223 | struct ixgbe_bus_info *bus = &hw->bus; | ||
1224 | u16 pci_gen = 0; | ||
1225 | u16 pci_ctrl2 = 0; | ||
1226 | |||
1227 | ixgbe_set_lan_id_multi_port_pcie(hw); | ||
1228 | |||
1229 | /* check if LAN0 is disabled */ | ||
1230 | hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); | ||
1231 | if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { | ||
1232 | |||
1233 | hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); | ||
1234 | |||
1235 | /* if LAN0 is completely disabled force function to 0 */ | ||
1236 | if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && | ||
1237 | !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && | ||
1238 | !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { | ||
1239 | |||
1240 | bus->func = 0; | ||
1241 | } | ||
1242 | } | ||
1243 | } | ||
1244 | |||
1191 | static struct ixgbe_mac_operations mac_ops_82598 = { | 1245 | static struct ixgbe_mac_operations mac_ops_82598 = { |
1192 | .init_hw = &ixgbe_init_hw_generic, | 1246 | .init_hw = &ixgbe_init_hw_generic, |
1193 | .reset_hw = &ixgbe_reset_hw_82598, | 1247 | .reset_hw = &ixgbe_reset_hw_82598, |
@@ -1199,7 +1253,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { | |||
1199 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | 1253 | .get_mac_addr = &ixgbe_get_mac_addr_generic, |
1200 | .stop_adapter = &ixgbe_stop_adapter_generic, | 1254 | .stop_adapter = &ixgbe_stop_adapter_generic, |
1201 | .get_bus_info = &ixgbe_get_bus_info_generic, | 1255 | .get_bus_info = &ixgbe_get_bus_info_generic, |
1202 | .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, | 1256 | .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, |
1203 | .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, | 1257 | .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, |
1204 | .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, | 1258 | .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, |
1205 | .setup_link = &ixgbe_setup_mac_link_82598, | 1259 | .setup_link = &ixgbe_setup_mac_link_82598, |
@@ -1227,6 +1281,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { | |||
1227 | static struct ixgbe_eeprom_operations eeprom_ops_82598 = { | 1281 | static struct ixgbe_eeprom_operations eeprom_ops_82598 = { |
1228 | .init_params = &ixgbe_init_eeprom_params_generic, | 1282 | .init_params = &ixgbe_init_eeprom_params_generic, |
1229 | .read = &ixgbe_read_eerd_generic, | 1283 | .read = &ixgbe_read_eerd_generic, |
1284 | .read_buffer = &ixgbe_read_eerd_buffer_generic, | ||
1230 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, | 1285 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, |
1231 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | 1286 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, |
1232 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, | 1287 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 00aeba385a2f..8ee661245af3 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #define IXGBE_82599_RAR_ENTRIES 128 | 38 | #define IXGBE_82599_RAR_ENTRIES 128 |
39 | #define IXGBE_82599_MC_TBL_SIZE 128 | 39 | #define IXGBE_82599_MC_TBL_SIZE 128 |
40 | #define IXGBE_82599_VFT_TBL_SIZE 128 | 40 | #define IXGBE_82599_VFT_TBL_SIZE 128 |
41 | #define IXGBE_82599_RX_PB_SIZE 512 | ||
41 | 42 | ||
42 | static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 43 | static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
43 | static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 44 | static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
@@ -61,6 +62,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, | |||
61 | bool autoneg, | 62 | bool autoneg, |
62 | bool autoneg_wait_to_complete); | 63 | bool autoneg_wait_to_complete); |
63 | static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); | 64 | static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); |
65 | static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); | ||
64 | 66 | ||
65 | static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | 67 | static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) |
66 | { | 68 | { |
@@ -86,7 +88,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | |||
86 | if ((mac->ops.get_media_type(hw) == | 88 | if ((mac->ops.get_media_type(hw) == |
87 | ixgbe_media_type_backplane) && | 89 | ixgbe_media_type_backplane) && |
88 | (hw->phy.smart_speed == ixgbe_smart_speed_auto || | 90 | (hw->phy.smart_speed == ixgbe_smart_speed_auto || |
89 | hw->phy.smart_speed == ixgbe_smart_speed_on)) | 91 | hw->phy.smart_speed == ixgbe_smart_speed_on) && |
92 | !ixgbe_verify_lesm_fw_enabled_82599(hw)) | ||
90 | mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; | 93 | mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; |
91 | else | 94 | else |
92 | mac->ops.setup_link = &ixgbe_setup_mac_link_82599; | 95 | mac->ops.setup_link = &ixgbe_setup_mac_link_82599; |
@@ -107,7 +110,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) | |||
107 | 110 | ||
108 | ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, | 111 | ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, |
109 | &data_offset); | 112 | &data_offset); |
110 | |||
111 | if (ret_val != 0) | 113 | if (ret_val != 0) |
112 | goto setup_sfp_out; | 114 | goto setup_sfp_out; |
113 | 115 | ||
@@ -127,9 +129,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) | |||
127 | } | 129 | } |
128 | 130 | ||
129 | /* Release the semaphore */ | 131 | /* Release the semaphore */ |
130 | ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); | 132 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); |
131 | /* Delay obtaining semaphore again to allow FW access */ | 133 | /* |
132 | msleep(hw->eeprom.semaphore_delay); | 134 | * Delay obtaining semaphore again to allow FW access, |
135 | * semaphore_delay is in ms usleep_range needs us. | ||
136 | */ | ||
137 | usleep_range(hw->eeprom.semaphore_delay * 1000, | ||
138 | hw->eeprom.semaphore_delay * 2000); | ||
133 | 139 | ||
134 | /* Now restart DSP by setting Restart_AN and clearing LMS */ | 140 | /* Now restart DSP by setting Restart_AN and clearing LMS */ |
135 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, | 141 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, |
@@ -138,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) | |||
138 | 144 | ||
139 | /* Wait for AN to leave state 0 */ | 145 | /* Wait for AN to leave state 0 */ |
140 | for (i = 0; i < 10; i++) { | 146 | for (i = 0; i < 10; i++) { |
141 | msleep(4); | 147 | usleep_range(4000, 8000); |
142 | reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); | 148 | reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); |
143 | if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) | 149 | if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) |
144 | break; | 150 | break; |
@@ -353,6 +359,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) | |||
353 | case IXGBE_DEV_ID_82599_SFP: | 359 | case IXGBE_DEV_ID_82599_SFP: |
354 | case IXGBE_DEV_ID_82599_SFP_FCOE: | 360 | case IXGBE_DEV_ID_82599_SFP_FCOE: |
355 | case IXGBE_DEV_ID_82599_SFP_EM: | 361 | case IXGBE_DEV_ID_82599_SFP_EM: |
362 | case IXGBE_DEV_ID_82599_SFP_SF2: | ||
356 | media_type = ixgbe_media_type_fiber; | 363 | media_type = ixgbe_media_type_fiber; |
357 | break; | 364 | break; |
358 | case IXGBE_DEV_ID_82599_CX4: | 365 | case IXGBE_DEV_ID_82599_CX4: |
@@ -361,6 +368,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) | |||
361 | case IXGBE_DEV_ID_82599_T3_LOM: | 368 | case IXGBE_DEV_ID_82599_T3_LOM: |
362 | media_type = ixgbe_media_type_copper; | 369 | media_type = ixgbe_media_type_copper; |
363 | break; | 370 | break; |
371 | case IXGBE_DEV_ID_82599_LS: | ||
372 | media_type = ixgbe_media_type_fiber_lco; | ||
373 | break; | ||
364 | default: | 374 | default: |
365 | media_type = ixgbe_media_type_unknown; | 375 | media_type = ixgbe_media_type_unknown; |
366 | break; | 376 | break; |
@@ -486,7 +496,7 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | |||
486 | * | 496 | * |
487 | * Set the link speed in the AUTOC register and restarts link. | 497 | * Set the link speed in the AUTOC register and restarts link. |
488 | **/ | 498 | **/ |
489 | s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 499 | static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
490 | ixgbe_link_speed speed, | 500 | ixgbe_link_speed speed, |
491 | bool autoneg, | 501 | bool autoneg, |
492 | bool autoneg_wait_to_complete) | 502 | bool autoneg_wait_to_complete) |
@@ -1176,7 +1186,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1176 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | 1186 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & |
1177 | IXGBE_FDIRCTRL_INIT_DONE) | 1187 | IXGBE_FDIRCTRL_INIT_DONE) |
1178 | break; | 1188 | break; |
1179 | msleep(1); | 1189 | usleep_range(1000, 2000); |
1180 | } | 1190 | } |
1181 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) | 1191 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) |
1182 | hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); | 1192 | hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); |
@@ -1271,7 +1281,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1271 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | 1281 | if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & |
1272 | IXGBE_FDIRCTRL_INIT_DONE) | 1282 | IXGBE_FDIRCTRL_INIT_DONE) |
1273 | break; | 1283 | break; |
1274 | msleep(1); | 1284 | usleep_range(1000, 2000); |
1275 | } | 1285 | } |
1276 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) | 1286 | if (i >= IXGBE_FDIR_INIT_DONE_POLL) |
1277 | hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); | 1287 | hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n"); |
@@ -1740,30 +1750,29 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) | |||
1740 | * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx | 1750 | * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx |
1741 | * @hw: pointer to hardware structure | 1751 | * @hw: pointer to hardware structure |
1742 | * | 1752 | * |
1743 | * Starts the hardware using the generic start_hw function. | 1753 | * Starts the hardware using the generic start_hw function |
1744 | * Then performs device-specific: | 1754 | * and the generation start_hw function. |
1745 | * Clears the rate limiter registers. | 1755 | * Then performs revision-specific operations, if any. |
1746 | **/ | 1756 | **/ |
1747 | static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) | 1757 | static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) |
1748 | { | 1758 | { |
1749 | u32 q_num; | 1759 | s32 ret_val = 0; |
1750 | s32 ret_val; | ||
1751 | 1760 | ||
1752 | ret_val = ixgbe_start_hw_generic(hw); | 1761 | ret_val = ixgbe_start_hw_generic(hw); |
1762 | if (ret_val != 0) | ||
1763 | goto out; | ||
1753 | 1764 | ||
1754 | /* Clear the rate limiters */ | 1765 | ret_val = ixgbe_start_hw_gen2(hw); |
1755 | for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { | 1766 | if (ret_val != 0) |
1756 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num); | 1767 | goto out; |
1757 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); | ||
1758 | } | ||
1759 | IXGBE_WRITE_FLUSH(hw); | ||
1760 | 1768 | ||
1761 | /* We need to run link autotry after the driver loads */ | 1769 | /* We need to run link autotry after the driver loads */ |
1762 | hw->mac.autotry_restart = true; | 1770 | hw->mac.autotry_restart = true; |
1771 | hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE; | ||
1763 | 1772 | ||
1764 | if (ret_val == 0) | 1773 | if (ret_val == 0) |
1765 | ret_val = ixgbe_verify_fw_version_82599(hw); | 1774 | ret_val = ixgbe_verify_fw_version_82599(hw); |
1766 | 1775 | out: | |
1767 | return ret_val; | 1776 | return ret_val; |
1768 | } | 1777 | } |
1769 | 1778 | ||
@@ -1775,7 +1784,7 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) | |||
1775 | * If PHY already detected, maintains current PHY type in hw struct, | 1784 | * If PHY already detected, maintains current PHY type in hw struct, |
1776 | * otherwise executes the PHY detection routine. | 1785 | * otherwise executes the PHY detection routine. |
1777 | **/ | 1786 | **/ |
1778 | s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) | 1787 | static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) |
1779 | { | 1788 | { |
1780 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 1789 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
1781 | 1790 | ||
@@ -1968,21 +1977,6 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) | |||
1968 | } | 1977 | } |
1969 | 1978 | ||
1970 | /** | 1979 | /** |
1971 | * ixgbe_get_device_caps_82599 - Get additional device capabilities | ||
1972 | * @hw: pointer to hardware structure | ||
1973 | * @device_caps: the EEPROM word with the extra device capabilities | ||
1974 | * | ||
1975 | * This function will read the EEPROM location for the device capabilities, | ||
1976 | * and return the word through device_caps. | ||
1977 | **/ | ||
1978 | static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) | ||
1979 | { | ||
1980 | hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); | ||
1981 | |||
1982 | return 0; | ||
1983 | } | ||
1984 | |||
1985 | /** | ||
1986 | * ixgbe_verify_fw_version_82599 - verify fw version for 82599 | 1980 | * ixgbe_verify_fw_version_82599 - verify fw version for 82599 |
1987 | * @hw: pointer to hardware structure | 1981 | * @hw: pointer to hardware structure |
1988 | * | 1982 | * |
@@ -2030,6 +2024,110 @@ fw_version_out: | |||
2030 | return status; | 2024 | return status; |
2031 | } | 2025 | } |
2032 | 2026 | ||
2027 | /** | ||
2028 | * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. | ||
2029 | * @hw: pointer to hardware structure | ||
2030 | * | ||
2031 | * Returns true if the LESM FW module is present and enabled. Otherwise | ||
2032 | * returns false. Smart Speed must be disabled if LESM FW module is enabled. | ||
2033 | **/ | ||
2034 | static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) | ||
2035 | { | ||
2036 | bool lesm_enabled = false; | ||
2037 | u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; | ||
2038 | s32 status; | ||
2039 | |||
2040 | /* get the offset to the Firmware Module block */ | ||
2041 | status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); | ||
2042 | |||
2043 | if ((status != 0) || | ||
2044 | (fw_offset == 0) || (fw_offset == 0xFFFF)) | ||
2045 | goto out; | ||
2046 | |||
2047 | /* get the offset to the LESM Parameters block */ | ||
2048 | status = hw->eeprom.ops.read(hw, (fw_offset + | ||
2049 | IXGBE_FW_LESM_PARAMETERS_PTR), | ||
2050 | &fw_lesm_param_offset); | ||
2051 | |||
2052 | if ((status != 0) || | ||
2053 | (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) | ||
2054 | goto out; | ||
2055 | |||
2056 | /* get the lesm state word */ | ||
2057 | status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + | ||
2058 | IXGBE_FW_LESM_STATE_1), | ||
2059 | &fw_lesm_state); | ||
2060 | |||
2061 | if ((status == 0) && | ||
2062 | (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) | ||
2063 | lesm_enabled = true; | ||
2064 | |||
2065 | out: | ||
2066 | return lesm_enabled; | ||
2067 | } | ||
2068 | |||
2069 | /** | ||
2070 | * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using | ||
2071 | * fastest available method | ||
2072 | * | ||
2073 | * @hw: pointer to hardware structure | ||
2074 | * @offset: offset of word in EEPROM to read | ||
2075 | * @words: number of words | ||
2076 | * @data: word(s) read from the EEPROM | ||
2077 | * | ||
2078 | * Retrieves 16 bit word(s) read from EEPROM | ||
2079 | **/ | ||
2080 | static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, | ||
2081 | u16 words, u16 *data) | ||
2082 | { | ||
2083 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; | ||
2084 | s32 ret_val = IXGBE_ERR_CONFIG; | ||
2085 | |||
2086 | /* | ||
2087 | * If EEPROM is detected and can be addressed using 14 bits, | ||
2088 | * use EERD otherwise use bit bang | ||
2089 | */ | ||
2090 | if ((eeprom->type == ixgbe_eeprom_spi) && | ||
2091 | (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) | ||
2092 | ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, | ||
2093 | data); | ||
2094 | else | ||
2095 | ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, | ||
2096 | words, | ||
2097 | data); | ||
2098 | |||
2099 | return ret_val; | ||
2100 | } | ||
2101 | |||
2102 | /** | ||
2103 | * ixgbe_read_eeprom_82599 - Read EEPROM word using | ||
2104 | * fastest available method | ||
2105 | * | ||
2106 | * @hw: pointer to hardware structure | ||
2107 | * @offset: offset of word in the EEPROM to read | ||
2108 | * @data: word read from the EEPROM | ||
2109 | * | ||
2110 | * Reads a 16 bit word from the EEPROM | ||
2111 | **/ | ||
2112 | static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, | ||
2113 | u16 offset, u16 *data) | ||
2114 | { | ||
2115 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; | ||
2116 | s32 ret_val = IXGBE_ERR_CONFIG; | ||
2117 | |||
2118 | /* | ||
2119 | * If EEPROM is detected and can be addressed using 14 bits, | ||
2120 | * use EERD otherwise use bit bang | ||
2121 | */ | ||
2122 | if ((eeprom->type == ixgbe_eeprom_spi) && | ||
2123 | (offset <= IXGBE_EERD_MAX_ADDR)) | ||
2124 | ret_val = ixgbe_read_eerd_generic(hw, offset, data); | ||
2125 | else | ||
2126 | ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); | ||
2127 | |||
2128 | return ret_val; | ||
2129 | } | ||
2130 | |||
2033 | static struct ixgbe_mac_operations mac_ops_82599 = { | 2131 | static struct ixgbe_mac_operations mac_ops_82599 = { |
2034 | .init_hw = &ixgbe_init_hw_generic, | 2132 | .init_hw = &ixgbe_init_hw_generic, |
2035 | .reset_hw = &ixgbe_reset_hw_82599, | 2133 | .reset_hw = &ixgbe_reset_hw_82599, |
@@ -2040,7 +2138,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { | |||
2040 | .enable_rx_dma = &ixgbe_enable_rx_dma_82599, | 2138 | .enable_rx_dma = &ixgbe_enable_rx_dma_82599, |
2041 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | 2139 | .get_mac_addr = &ixgbe_get_mac_addr_generic, |
2042 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, | 2140 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, |
2043 | .get_device_caps = &ixgbe_get_device_caps_82599, | 2141 | .get_device_caps = &ixgbe_get_device_caps_generic, |
2044 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, | 2142 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, |
2045 | .stop_adapter = &ixgbe_stop_adapter_generic, | 2143 | .stop_adapter = &ixgbe_stop_adapter_generic, |
2046 | .get_bus_info = &ixgbe_get_bus_info_generic, | 2144 | .get_bus_info = &ixgbe_get_bus_info_generic, |
@@ -2076,8 +2174,10 @@ static struct ixgbe_mac_operations mac_ops_82599 = { | |||
2076 | 2174 | ||
2077 | static struct ixgbe_eeprom_operations eeprom_ops_82599 = { | 2175 | static struct ixgbe_eeprom_operations eeprom_ops_82599 = { |
2078 | .init_params = &ixgbe_init_eeprom_params_generic, | 2176 | .init_params = &ixgbe_init_eeprom_params_generic, |
2079 | .read = &ixgbe_read_eerd_generic, | 2177 | .read = &ixgbe_read_eeprom_82599, |
2178 | .read_buffer = &ixgbe_read_eeprom_buffer_82599, | ||
2080 | .write = &ixgbe_write_eeprom_generic, | 2179 | .write = &ixgbe_write_eeprom_generic, |
2180 | .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, | ||
2081 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, | 2181 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, |
2082 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | 2182 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, |
2083 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, | 2183 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index bcd952916eb2..b894b42a741c 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -54,6 +54,13 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); | |||
54 | static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, | 54 | static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, |
55 | u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); | 55 | u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); |
56 | static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); | 56 | static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); |
57 | static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); | ||
58 | static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, | ||
59 | u16 words, u16 *data); | ||
60 | static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, | ||
61 | u16 words, u16 *data); | ||
62 | static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, | ||
63 | u16 offset); | ||
57 | 64 | ||
58 | /** | 65 | /** |
59 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx | 66 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx |
@@ -96,6 +103,45 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) | |||
96 | } | 103 | } |
97 | 104 | ||
98 | /** | 105 | /** |
106 | * ixgbe_start_hw_gen2 - Init sequence for common device family | ||
107 | * @hw: pointer to hw structure | ||
108 | * | ||
109 | * Performs the init sequence common to the second generation | ||
110 | * of 10 GbE devices. | ||
111 | * Devices in the second generation: | ||
112 | * 82599 | ||
113 | * X540 | ||
114 | **/ | ||
115 | s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) | ||
116 | { | ||
117 | u32 i; | ||
118 | u32 regval; | ||
119 | |||
120 | /* Clear the rate limiters */ | ||
121 | for (i = 0; i < hw->mac.max_tx_queues; i++) { | ||
122 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); | ||
123 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); | ||
124 | } | ||
125 | IXGBE_WRITE_FLUSH(hw); | ||
126 | |||
127 | /* Disable relaxed ordering */ | ||
128 | for (i = 0; i < hw->mac.max_tx_queues; i++) { | ||
129 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); | ||
130 | regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
131 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); | ||
132 | } | ||
133 | |||
134 | for (i = 0; i < hw->mac.max_rx_queues; i++) { | ||
135 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | ||
136 | regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | ||
137 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | ||
138 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | ||
139 | } | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | /** | ||
99 | * ixgbe_init_hw_generic - Generic hardware initialization | 145 | * ixgbe_init_hw_generic - Generic hardware initialization |
100 | * @hw: pointer to hardware structure | 146 | * @hw: pointer to hardware structure |
101 | * | 147 | * |
@@ -464,7 +510,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) | |||
464 | reg_val &= ~(IXGBE_RXCTRL_RXEN); | 510 | reg_val &= ~(IXGBE_RXCTRL_RXEN); |
465 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); | 511 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); |
466 | IXGBE_WRITE_FLUSH(hw); | 512 | IXGBE_WRITE_FLUSH(hw); |
467 | msleep(2); | 513 | usleep_range(2000, 4000); |
468 | 514 | ||
469 | /* Clear interrupt mask to stop from interrupts being generated */ | 515 | /* Clear interrupt mask to stop from interrupts being generated */ |
470 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); | 516 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); |
@@ -545,6 +591,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) | |||
545 | /* Set default semaphore delay to 10ms which is a well | 591 | /* Set default semaphore delay to 10ms which is a well |
546 | * tested value */ | 592 | * tested value */ |
547 | eeprom->semaphore_delay = 10; | 593 | eeprom->semaphore_delay = 10; |
594 | /* Clear EEPROM page size, it will be initialized as needed */ | ||
595 | eeprom->word_page_size = 0; | ||
548 | 596 | ||
549 | /* | 597 | /* |
550 | * Check for EEPROM present first. | 598 | * Check for EEPROM present first. |
@@ -577,26 +625,78 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) | |||
577 | } | 625 | } |
578 | 626 | ||
579 | /** | 627 | /** |
580 | * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM | 628 | * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang |
581 | * @hw: pointer to hardware structure | 629 | * @hw: pointer to hardware structure |
582 | * @offset: offset within the EEPROM to be written to | 630 | * @offset: offset within the EEPROM to write |
583 | * @data: 16 bit word to be written to the EEPROM | 631 | * @words: number of words |
632 | * @data: 16 bit word(s) to write to EEPROM | ||
584 | * | 633 | * |
585 | * If ixgbe_eeprom_update_checksum is not called after this function, the | 634 | * Reads 16 bit word(s) from EEPROM through bit-bang method |
586 | * EEPROM will most likely contain an invalid checksum. | ||
587 | **/ | 635 | **/ |
588 | s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) | 636 | s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, |
637 | u16 words, u16 *data) | ||
589 | { | 638 | { |
590 | s32 status; | 639 | s32 status = 0; |
591 | u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; | 640 | u16 i, count; |
592 | 641 | ||
593 | hw->eeprom.ops.init_params(hw); | 642 | hw->eeprom.ops.init_params(hw); |
594 | 643 | ||
595 | if (offset >= hw->eeprom.word_size) { | 644 | if (words == 0) { |
645 | status = IXGBE_ERR_INVALID_ARGUMENT; | ||
646 | goto out; | ||
647 | } | ||
648 | |||
649 | if (offset + words > hw->eeprom.word_size) { | ||
596 | status = IXGBE_ERR_EEPROM; | 650 | status = IXGBE_ERR_EEPROM; |
597 | goto out; | 651 | goto out; |
598 | } | 652 | } |
599 | 653 | ||
654 | /* | ||
655 | * The EEPROM page size cannot be queried from the chip. We do lazy | ||
656 | * initialization. It is worth to do that when we write large buffer. | ||
657 | */ | ||
658 | if ((hw->eeprom.word_page_size == 0) && | ||
659 | (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) | ||
660 | ixgbe_detect_eeprom_page_size_generic(hw, offset); | ||
661 | |||
662 | /* | ||
663 | * We cannot hold synchronization semaphores for too long | ||
664 | * to avoid other entity starvation. However it is more efficient | ||
665 | * to read in bursts than synchronizing access for each word. | ||
666 | */ | ||
667 | for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { | ||
668 | count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? | ||
669 | IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); | ||
670 | status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, | ||
671 | count, &data[i]); | ||
672 | |||
673 | if (status != 0) | ||
674 | break; | ||
675 | } | ||
676 | |||
677 | out: | ||
678 | return status; | ||
679 | } | ||
680 | |||
681 | /** | ||
682 | * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM | ||
683 | * @hw: pointer to hardware structure | ||
684 | * @offset: offset within the EEPROM to be written to | ||
685 | * @words: number of word(s) | ||
686 | * @data: 16 bit word(s) to be written to the EEPROM | ||
687 | * | ||
688 | * If ixgbe_eeprom_update_checksum is not called after this function, the | ||
689 | * EEPROM will most likely contain an invalid checksum. | ||
690 | **/ | ||
691 | static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, | ||
692 | u16 words, u16 *data) | ||
693 | { | ||
694 | s32 status; | ||
695 | u16 word; | ||
696 | u16 page_size; | ||
697 | u16 i; | ||
698 | u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; | ||
699 | |||
600 | /* Prepare the EEPROM for writing */ | 700 | /* Prepare the EEPROM for writing */ |
601 | status = ixgbe_acquire_eeprom(hw); | 701 | status = ixgbe_acquire_eeprom(hw); |
602 | 702 | ||
@@ -608,62 +708,147 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) | |||
608 | } | 708 | } |
609 | 709 | ||
610 | if (status == 0) { | 710 | if (status == 0) { |
611 | ixgbe_standby_eeprom(hw); | 711 | for (i = 0; i < words; i++) { |
712 | ixgbe_standby_eeprom(hw); | ||
612 | 713 | ||
613 | /* Send the WRITE ENABLE command (8 bit opcode ) */ | 714 | /* Send the WRITE ENABLE command (8 bit opcode ) */ |
614 | ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, | 715 | ixgbe_shift_out_eeprom_bits(hw, |
615 | IXGBE_EEPROM_OPCODE_BITS); | 716 | IXGBE_EEPROM_WREN_OPCODE_SPI, |
717 | IXGBE_EEPROM_OPCODE_BITS); | ||
616 | 718 | ||
617 | ixgbe_standby_eeprom(hw); | 719 | ixgbe_standby_eeprom(hw); |
618 | 720 | ||
619 | /* | 721 | /* |
620 | * Some SPI eeproms use the 8th address bit embedded in the | 722 | * Some SPI eeproms use the 8th address bit embedded |
621 | * opcode | 723 | * in the opcode |
622 | */ | 724 | */ |
623 | if ((hw->eeprom.address_bits == 8) && (offset >= 128)) | 725 | if ((hw->eeprom.address_bits == 8) && |
624 | write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; | 726 | ((offset + i) >= 128)) |
727 | write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; | ||
728 | |||
729 | /* Send the Write command (8-bit opcode + addr) */ | ||
730 | ixgbe_shift_out_eeprom_bits(hw, write_opcode, | ||
731 | IXGBE_EEPROM_OPCODE_BITS); | ||
732 | ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), | ||
733 | hw->eeprom.address_bits); | ||
734 | |||
735 | page_size = hw->eeprom.word_page_size; | ||
736 | |||
737 | /* Send the data in burst via SPI*/ | ||
738 | do { | ||
739 | word = data[i]; | ||
740 | word = (word >> 8) | (word << 8); | ||
741 | ixgbe_shift_out_eeprom_bits(hw, word, 16); | ||
742 | |||
743 | if (page_size == 0) | ||
744 | break; | ||
745 | |||
746 | /* do not wrap around page */ | ||
747 | if (((offset + i) & (page_size - 1)) == | ||
748 | (page_size - 1)) | ||
749 | break; | ||
750 | } while (++i < words); | ||
751 | |||
752 | ixgbe_standby_eeprom(hw); | ||
753 | usleep_range(10000, 20000); | ||
754 | } | ||
755 | /* Done with writing - release the EEPROM */ | ||
756 | ixgbe_release_eeprom(hw); | ||
757 | } | ||
625 | 758 | ||
626 | /* Send the Write command (8-bit opcode + addr) */ | 759 | return status; |
627 | ixgbe_shift_out_eeprom_bits(hw, write_opcode, | 760 | } |
628 | IXGBE_EEPROM_OPCODE_BITS); | ||
629 | ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), | ||
630 | hw->eeprom.address_bits); | ||
631 | 761 | ||
632 | /* Send the data */ | 762 | /** |
633 | data = (data >> 8) | (data << 8); | 763 | * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM |
634 | ixgbe_shift_out_eeprom_bits(hw, data, 16); | 764 | * @hw: pointer to hardware structure |
635 | ixgbe_standby_eeprom(hw); | 765 | * @offset: offset within the EEPROM to be written to |
766 | * @data: 16 bit word to be written to the EEPROM | ||
767 | * | ||
768 | * If ixgbe_eeprom_update_checksum is not called after this function, the | ||
769 | * EEPROM will most likely contain an invalid checksum. | ||
770 | **/ | ||
771 | s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) | ||
772 | { | ||
773 | s32 status; | ||
636 | 774 | ||
637 | /* Done with writing - release the EEPROM */ | 775 | hw->eeprom.ops.init_params(hw); |
638 | ixgbe_release_eeprom(hw); | 776 | |
777 | if (offset >= hw->eeprom.word_size) { | ||
778 | status = IXGBE_ERR_EEPROM; | ||
779 | goto out; | ||
639 | } | 780 | } |
640 | 781 | ||
782 | status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); | ||
783 | |||
641 | out: | 784 | out: |
642 | return status; | 785 | return status; |
643 | } | 786 | } |
644 | 787 | ||
645 | /** | 788 | /** |
646 | * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang | 789 | * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang |
647 | * @hw: pointer to hardware structure | 790 | * @hw: pointer to hardware structure |
648 | * @offset: offset within the EEPROM to be read | 791 | * @offset: offset within the EEPROM to be read |
649 | * @data: read 16 bit value from EEPROM | 792 | * @words: number of word(s) |
793 | * @data: read 16 bit words(s) from EEPROM | ||
650 | * | 794 | * |
651 | * Reads 16 bit value from EEPROM through bit-bang method | 795 | * Reads 16 bit word(s) from EEPROM through bit-bang method |
652 | **/ | 796 | **/ |
653 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | 797 | s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, |
654 | u16 *data) | 798 | u16 words, u16 *data) |
655 | { | 799 | { |
656 | s32 status; | 800 | s32 status = 0; |
657 | u16 word_in; | 801 | u16 i, count; |
658 | u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; | ||
659 | 802 | ||
660 | hw->eeprom.ops.init_params(hw); | 803 | hw->eeprom.ops.init_params(hw); |
661 | 804 | ||
662 | if (offset >= hw->eeprom.word_size) { | 805 | if (words == 0) { |
806 | status = IXGBE_ERR_INVALID_ARGUMENT; | ||
807 | goto out; | ||
808 | } | ||
809 | |||
810 | if (offset + words > hw->eeprom.word_size) { | ||
663 | status = IXGBE_ERR_EEPROM; | 811 | status = IXGBE_ERR_EEPROM; |
664 | goto out; | 812 | goto out; |
665 | } | 813 | } |
666 | 814 | ||
815 | /* | ||
816 | * We cannot hold synchronization semaphores for too long | ||
817 | * to avoid other entity starvation. However it is more efficient | ||
818 | * to read in bursts than synchronizing access for each word. | ||
819 | */ | ||
820 | for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { | ||
821 | count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? | ||
822 | IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); | ||
823 | |||
824 | status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, | ||
825 | count, &data[i]); | ||
826 | |||
827 | if (status != 0) | ||
828 | break; | ||
829 | } | ||
830 | |||
831 | out: | ||
832 | return status; | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang | ||
837 | * @hw: pointer to hardware structure | ||
838 | * @offset: offset within the EEPROM to be read | ||
839 | * @words: number of word(s) | ||
840 | * @data: read 16 bit word(s) from EEPROM | ||
841 | * | ||
842 | * Reads 16 bit word(s) from EEPROM through bit-bang method | ||
843 | **/ | ||
844 | static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, | ||
845 | u16 words, u16 *data) | ||
846 | { | ||
847 | s32 status; | ||
848 | u16 word_in; | ||
849 | u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; | ||
850 | u16 i; | ||
851 | |||
667 | /* Prepare the EEPROM for reading */ | 852 | /* Prepare the EEPROM for reading */ |
668 | status = ixgbe_acquire_eeprom(hw); | 853 | status = ixgbe_acquire_eeprom(hw); |
669 | 854 | ||
@@ -675,29 +860,145 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | |||
675 | } | 860 | } |
676 | 861 | ||
677 | if (status == 0) { | 862 | if (status == 0) { |
678 | ixgbe_standby_eeprom(hw); | 863 | for (i = 0; i < words; i++) { |
864 | ixgbe_standby_eeprom(hw); | ||
865 | /* | ||
866 | * Some SPI eeproms use the 8th address bit embedded | ||
867 | * in the opcode | ||
868 | */ | ||
869 | if ((hw->eeprom.address_bits == 8) && | ||
870 | ((offset + i) >= 128)) | ||
871 | read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; | ||
872 | |||
873 | /* Send the READ command (opcode + addr) */ | ||
874 | ixgbe_shift_out_eeprom_bits(hw, read_opcode, | ||
875 | IXGBE_EEPROM_OPCODE_BITS); | ||
876 | ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), | ||
877 | hw->eeprom.address_bits); | ||
878 | |||
879 | /* Read the data. */ | ||
880 | word_in = ixgbe_shift_in_eeprom_bits(hw, 16); | ||
881 | data[i] = (word_in >> 8) | (word_in << 8); | ||
882 | } | ||
679 | 883 | ||
680 | /* | 884 | /* End this read operation */ |
681 | * Some SPI eeproms use the 8th address bit embedded in the | 885 | ixgbe_release_eeprom(hw); |
682 | * opcode | 886 | } |
683 | */ | ||
684 | if ((hw->eeprom.address_bits == 8) && (offset >= 128)) | ||
685 | read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; | ||
686 | 887 | ||
687 | /* Send the READ command (opcode + addr) */ | 888 | return status; |
688 | ixgbe_shift_out_eeprom_bits(hw, read_opcode, | 889 | } |
689 | IXGBE_EEPROM_OPCODE_BITS); | 890 | |
690 | ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), | 891 | /** |
691 | hw->eeprom.address_bits); | 892 | * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang |
893 | * @hw: pointer to hardware structure | ||
894 | * @offset: offset within the EEPROM to be read | ||
895 | * @data: read 16 bit value from EEPROM | ||
896 | * | ||
897 | * Reads 16 bit value from EEPROM through bit-bang method | ||
898 | **/ | ||
899 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | ||
900 | u16 *data) | ||
901 | { | ||
902 | s32 status; | ||
692 | 903 | ||
693 | /* Read the data. */ | 904 | hw->eeprom.ops.init_params(hw); |
694 | word_in = ixgbe_shift_in_eeprom_bits(hw, 16); | ||
695 | *data = (word_in >> 8) | (word_in << 8); | ||
696 | 905 | ||
697 | /* End this read operation */ | 906 | if (offset >= hw->eeprom.word_size) { |
698 | ixgbe_release_eeprom(hw); | 907 | status = IXGBE_ERR_EEPROM; |
908 | goto out; | ||
909 | } | ||
910 | |||
911 | status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); | ||
912 | |||
913 | out: | ||
914 | return status; | ||
915 | } | ||
916 | |||
917 | /** | ||
918 | * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD | ||
919 | * @hw: pointer to hardware structure | ||
920 | * @offset: offset of word in the EEPROM to read | ||
921 | * @words: number of word(s) | ||
922 | * @data: 16 bit word(s) from the EEPROM | ||
923 | * | ||
924 | * Reads a 16 bit word(s) from the EEPROM using the EERD register. | ||
925 | **/ | ||
926 | s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, | ||
927 | u16 words, u16 *data) | ||
928 | { | ||
929 | u32 eerd; | ||
930 | s32 status = 0; | ||
931 | u32 i; | ||
932 | |||
933 | hw->eeprom.ops.init_params(hw); | ||
934 | |||
935 | if (words == 0) { | ||
936 | status = IXGBE_ERR_INVALID_ARGUMENT; | ||
937 | goto out; | ||
938 | } | ||
939 | |||
940 | if (offset >= hw->eeprom.word_size) { | ||
941 | status = IXGBE_ERR_EEPROM; | ||
942 | goto out; | ||
699 | } | 943 | } |
700 | 944 | ||
945 | for (i = 0; i < words; i++) { | ||
946 | eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) + | ||
947 | IXGBE_EEPROM_RW_REG_START; | ||
948 | |||
949 | IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); | ||
950 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); | ||
951 | |||
952 | if (status == 0) { | ||
953 | data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> | ||
954 | IXGBE_EEPROM_RW_REG_DATA); | ||
955 | } else { | ||
956 | hw_dbg(hw, "Eeprom read timed out\n"); | ||
957 | goto out; | ||
958 | } | ||
959 | } | ||
960 | out: | ||
961 | return status; | ||
962 | } | ||
963 | |||
964 | /** | ||
965 | * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size | ||
966 | * @hw: pointer to hardware structure | ||
967 | * @offset: offset within the EEPROM to be used as a scratch pad | ||
968 | * | ||
969 | * Discover EEPROM page size by writing marching data at given offset. | ||
970 | * This function is called only when we are writing a new large buffer | ||
971 | * at given offset so the data would be overwritten anyway. | ||
972 | **/ | ||
973 | static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, | ||
974 | u16 offset) | ||
975 | { | ||
976 | u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; | ||
977 | s32 status = 0; | ||
978 | u16 i; | ||
979 | |||
980 | for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) | ||
981 | data[i] = i; | ||
982 | |||
983 | hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; | ||
984 | status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, | ||
985 | IXGBE_EEPROM_PAGE_SIZE_MAX, data); | ||
986 | hw->eeprom.word_page_size = 0; | ||
987 | if (status != 0) | ||
988 | goto out; | ||
989 | |||
990 | status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); | ||
991 | if (status != 0) | ||
992 | goto out; | ||
993 | |||
994 | /* | ||
995 | * When writing in burst more than the actual page size | ||
996 | * EEPROM address wraps around current page. | ||
997 | */ | ||
998 | hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; | ||
999 | |||
1000 | hw_dbg(hw, "Detected EEPROM page size = %d words.", | ||
1001 | hw->eeprom.word_page_size); | ||
701 | out: | 1002 | out: |
702 | return status; | 1003 | return status; |
703 | } | 1004 | } |
@@ -712,33 +1013,75 @@ out: | |||
712 | **/ | 1013 | **/ |
713 | s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) | 1014 | s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) |
714 | { | 1015 | { |
715 | u32 eerd; | 1016 | return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); |
716 | s32 status; | 1017 | } |
1018 | |||
1019 | /** | ||
1020 | * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR | ||
1021 | * @hw: pointer to hardware structure | ||
1022 | * @offset: offset of word in the EEPROM to write | ||
1023 | * @words: number of words | ||
1024 | * @data: word(s) write to the EEPROM | ||
1025 | * | ||
1026 | * Write a 16 bit word(s) to the EEPROM using the EEWR register. | ||
1027 | **/ | ||
1028 | s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, | ||
1029 | u16 words, u16 *data) | ||
1030 | { | ||
1031 | u32 eewr; | ||
1032 | s32 status = 0; | ||
1033 | u16 i; | ||
717 | 1034 | ||
718 | hw->eeprom.ops.init_params(hw); | 1035 | hw->eeprom.ops.init_params(hw); |
719 | 1036 | ||
1037 | if (words == 0) { | ||
1038 | status = IXGBE_ERR_INVALID_ARGUMENT; | ||
1039 | goto out; | ||
1040 | } | ||
1041 | |||
720 | if (offset >= hw->eeprom.word_size) { | 1042 | if (offset >= hw->eeprom.word_size) { |
721 | status = IXGBE_ERR_EEPROM; | 1043 | status = IXGBE_ERR_EEPROM; |
722 | goto out; | 1044 | goto out; |
723 | } | 1045 | } |
724 | 1046 | ||
725 | eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) + | 1047 | for (i = 0; i < words; i++) { |
726 | IXGBE_EEPROM_RW_REG_START; | 1048 | eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | |
1049 | (data[i] << IXGBE_EEPROM_RW_REG_DATA) | | ||
1050 | IXGBE_EEPROM_RW_REG_START; | ||
727 | 1051 | ||
728 | IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); | 1052 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); |
729 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); | 1053 | if (status != 0) { |
1054 | hw_dbg(hw, "Eeprom write EEWR timed out\n"); | ||
1055 | goto out; | ||
1056 | } | ||
730 | 1057 | ||
731 | if (status == 0) | 1058 | IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); |
732 | *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> | 1059 | |
733 | IXGBE_EEPROM_RW_REG_DATA); | 1060 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); |
734 | else | 1061 | if (status != 0) { |
735 | hw_dbg(hw, "Eeprom read timed out\n"); | 1062 | hw_dbg(hw, "Eeprom write EEWR timed out\n"); |
1063 | goto out; | ||
1064 | } | ||
1065 | } | ||
736 | 1066 | ||
737 | out: | 1067 | out: |
738 | return status; | 1068 | return status; |
739 | } | 1069 | } |
740 | 1070 | ||
741 | /** | 1071 | /** |
1072 | * ixgbe_write_eewr_generic - Write EEPROM word using EEWR | ||
1073 | * @hw: pointer to hardware structure | ||
1074 | * @offset: offset of word in the EEPROM to write | ||
1075 | * @data: word write to the EEPROM | ||
1076 | * | ||
1077 | * Write a 16 bit word to the EEPROM using the EEWR register. | ||
1078 | **/ | ||
1079 | s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) | ||
1080 | { | ||
1081 | return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); | ||
1082 | } | ||
1083 | |||
1084 | /** | ||
742 | * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status | 1085 | * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status |
743 | * @hw: pointer to hardware structure | 1086 | * @hw: pointer to hardware structure |
744 | * @ee_reg: EEPROM flag for polling | 1087 | * @ee_reg: EEPROM flag for polling |
@@ -746,7 +1089,7 @@ out: | |||
746 | * Polls the status bit (bit 1) of the EERD or EEWR to determine when the | 1089 | * Polls the status bit (bit 1) of the EERD or EEWR to determine when the |
747 | * read or write is done respectively. | 1090 | * read or write is done respectively. |
748 | **/ | 1091 | **/ |
749 | s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) | 1092 | static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) |
750 | { | 1093 | { |
751 | u32 i; | 1094 | u32 i; |
752 | u32 reg; | 1095 | u32 reg; |
@@ -846,6 +1189,28 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) | |||
846 | udelay(50); | 1189 | udelay(50); |
847 | } | 1190 | } |
848 | 1191 | ||
1192 | if (i == timeout) { | ||
1193 | hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore " | ||
1194 | "not granted.\n"); | ||
1195 | /* | ||
1196 | * this release is particularly important because our attempts | ||
1197 | * above to get the semaphore may have succeeded, and if there | ||
1198 | * was a timeout, we should unconditionally clear the semaphore | ||
1199 | * bits to free the driver to make progress | ||
1200 | */ | ||
1201 | ixgbe_release_eeprom_semaphore(hw); | ||
1202 | |||
1203 | udelay(50); | ||
1204 | /* | ||
1205 | * one last try | ||
1206 | * If the SMBI bit is 0 when we read it, then the bit will be | ||
1207 | * set and we have the semaphore | ||
1208 | */ | ||
1209 | swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); | ||
1210 | if (!(swsm & IXGBE_SWSM_SMBI)) | ||
1211 | status = 0; | ||
1212 | } | ||
1213 | |||
849 | /* Now get the semaphore between SW/FW through the SWESMBI bit */ | 1214 | /* Now get the semaphore between SW/FW through the SWESMBI bit */ |
850 | if (status == 0) { | 1215 | if (status == 0) { |
851 | for (i = 0; i < timeout; i++) { | 1216 | for (i = 0; i < timeout; i++) { |
@@ -1112,8 +1477,12 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) | |||
1112 | 1477 | ||
1113 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); | 1478 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); |
1114 | 1479 | ||
1115 | /* Delay before attempt to obtain semaphore again to allow FW access */ | 1480 | /* |
1116 | msleep(hw->eeprom.semaphore_delay); | 1481 | * Delay before attempt to obtain semaphore again to allow FW |
1482 | * access. semaphore_delay is in ms we need us for usleep_range | ||
1483 | */ | ||
1484 | usleep_range(hw->eeprom.semaphore_delay * 1000, | ||
1485 | hw->eeprom.semaphore_delay * 2000); | ||
1117 | } | 1486 | } |
1118 | 1487 | ||
1119 | /** | 1488 | /** |
@@ -2189,7 +2558,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) | |||
2189 | * thread currently using resource (swmask) | 2558 | * thread currently using resource (swmask) |
2190 | */ | 2559 | */ |
2191 | ixgbe_release_eeprom_semaphore(hw); | 2560 | ixgbe_release_eeprom_semaphore(hw); |
2192 | msleep(5); | 2561 | usleep_range(5000, 10000); |
2193 | timeout--; | 2562 | timeout--; |
2194 | } | 2563 | } |
2195 | 2564 | ||
@@ -2263,7 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) | |||
2263 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; | 2632 | autoc_reg |= IXGBE_AUTOC_AN_RESTART; |
2264 | autoc_reg |= IXGBE_AUTOC_FLU; | 2633 | autoc_reg |= IXGBE_AUTOC_FLU; |
2265 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | 2634 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); |
2266 | msleep(10); | 2635 | usleep_range(10000, 20000); |
2267 | } | 2636 | } |
2268 | 2637 | ||
2269 | led_reg &= ~IXGBE_LED_MODE_MASK(index); | 2638 | led_reg &= ~IXGBE_LED_MODE_MASK(index); |
@@ -2883,3 +3252,18 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) | |||
2883 | pfvfspoof &= ~(1 << vf_target_shift); | 3252 | pfvfspoof &= ~(1 << vf_target_shift); |
2884 | IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); | 3253 | IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); |
2885 | } | 3254 | } |
3255 | |||
3256 | /** | ||
3257 | * ixgbe_get_device_caps_generic - Get additional device capabilities | ||
3258 | * @hw: pointer to hardware structure | ||
3259 | * @device_caps: the EEPROM word with the extra device capabilities | ||
3260 | * | ||
3261 | * This function will read the EEPROM location for the device capabilities, | ||
3262 | * and return the word through device_caps. | ||
3263 | **/ | ||
3264 | s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) | ||
3265 | { | ||
3266 | hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); | ||
3267 | |||
3268 | return 0; | ||
3269 | } | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 508f635fc2ca..46be83cfb500 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -35,6 +35,7 @@ u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); | |||
35 | s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); | 35 | s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); |
36 | s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); | 36 | s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); |
37 | s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); | 37 | s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); |
38 | s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); | ||
38 | s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); | 39 | s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); |
39 | s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, | 40 | s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, |
40 | u32 pba_num_size); | 41 | u32 pba_num_size); |
@@ -48,14 +49,22 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); | |||
48 | 49 | ||
49 | s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); | 50 | s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); |
50 | s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); | 51 | s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); |
52 | s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | ||
53 | u16 words, u16 *data); | ||
51 | s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); | 54 | s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); |
55 | s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, | ||
56 | u16 words, u16 *data); | ||
57 | s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); | ||
58 | s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, | ||
59 | u16 words, u16 *data); | ||
52 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | 60 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, |
53 | u16 *data); | 61 | u16 *data); |
62 | s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | ||
63 | u16 words, u16 *data); | ||
54 | u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); | 64 | u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); |
55 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, | 65 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, |
56 | u16 *checksum_val); | 66 | u16 *checksum_val); |
57 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); | 67 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); |
58 | s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); | ||
59 | 68 | ||
60 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, | 69 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, |
61 | u32 enable_addr); | 70 | u32 enable_addr); |
@@ -89,6 +98,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); | |||
89 | s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); | 98 | s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); |
90 | void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); | 99 | void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); |
91 | void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); | 100 | void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); |
101 | s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); | ||
92 | 102 | ||
93 | #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) | 103 | #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) |
94 | 104 | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 1bc57e52cee3..771d01a60d06 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c | |||
@@ -289,7 +289,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) | |||
289 | * Configure queue statistics registers, all queues belonging to same traffic | 289 | * Configure queue statistics registers, all queues belonging to same traffic |
290 | * class uses a single set of queue statistics counters. | 290 | * class uses a single set of queue statistics counters. |
291 | */ | 291 | */ |
292 | s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) | 292 | static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) |
293 | { | 293 | { |
294 | u32 reg = 0; | 294 | u32 reg = 0; |
295 | u8 i = 0; | 295 | u8 i = 0; |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 025af8c53ddb..d50cf78c234d 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c | |||
@@ -39,36 +39,52 @@ | |||
39 | */ | 39 | */ |
40 | static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba) | 40 | static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba) |
41 | { | 41 | { |
42 | s32 ret_val = 0; | 42 | int num_tcs = IXGBE_MAX_PACKET_BUFFERS; |
43 | u32 value = IXGBE_RXPBSIZE_64KB; | 43 | u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT; |
44 | u32 rxpktsize; | ||
45 | u32 txpktsize; | ||
46 | u32 txpbthresh; | ||
44 | u8 i = 0; | 47 | u8 i = 0; |
45 | 48 | ||
46 | /* Setup Rx packet buffer sizes */ | 49 | /* |
47 | switch (rx_pba) { | 50 | * This really means configure the first half of the TCs |
48 | case pba_80_48: | 51 | * (Traffic Classes) to use 5/8 of the Rx packet buffer |
49 | /* Setup the first four at 80KB */ | 52 | * space. To determine the size of the buffer for each TC, |
50 | value = IXGBE_RXPBSIZE_80KB; | 53 | * we are multiplying the average size by 5/4 and applying |
51 | for (; i < 4; i++) | 54 | * it to half of the traffic classes. |
52 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); | 55 | */ |
53 | /* Setup the last four at 48KB...don't re-init i */ | 56 | if (rx_pba == pba_80_48) { |
54 | value = IXGBE_RXPBSIZE_48KB; | 57 | rxpktsize = (rx_pb_size * 5) / (num_tcs * 4); |
55 | /* Fall Through */ | 58 | rx_pb_size -= rxpktsize * (num_tcs / 2); |
56 | case pba_equal: | 59 | for (; i < (num_tcs / 2); i++) |
57 | default: | 60 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); |
58 | for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) | 61 | } |
59 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value); | 62 | |
60 | 63 | /* Divide the remaining Rx packet buffer evenly among the TCs */ | |
61 | /* Setup Tx packet buffer sizes */ | 64 | rxpktsize = rx_pb_size / (num_tcs - i); |
62 | for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { | 65 | for (; i < num_tcs; i++) |
63 | IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), | 66 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); |
64 | IXGBE_TXPBSIZE_20KB); | 67 | |
65 | IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), | 68 | /* |
66 | IXGBE_TXPBTHRESH_DCB); | 69 | * Setup Tx packet buffer and threshold equally for all TCs |
67 | } | 70 | * TXPBTHRESH register is set in K so divide by 1024 and subtract |
68 | break; | 71 | * 10 since the largest packet we support is just over 9K. |
72 | */ | ||
73 | txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs; | ||
74 | txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; | ||
75 | for (i = 0; i < num_tcs; i++) { | ||
76 | IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); | ||
77 | IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); | ||
78 | } | ||
79 | |||
80 | /* Clear unused TCs, if any, to zero buffer size*/ | ||
81 | for (; i < MAX_TRAFFIC_CLASS; i++) { | ||
82 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); | ||
83 | IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); | ||
84 | IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); | ||
69 | } | 85 | } |
70 | 86 | ||
71 | return ret_val; | 87 | return 0; |
72 | } | 88 | } |
73 | 89 | ||
74 | /** | 90 | /** |
@@ -285,12 +301,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) | |||
285 | IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); | 301 | IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); |
286 | /* | 302 | /* |
287 | * Enable Receive PFC | 303 | * Enable Receive PFC |
288 | * We will always honor XOFF frames we receive when | 304 | * 82599 will always honor XOFF frames we receive when |
289 | * we are in PFC mode. | 305 | * we are in PFC mode however X540 only honors enabled |
306 | * traffic classes. | ||
290 | */ | 307 | */ |
291 | reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); | 308 | reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); |
292 | reg &= ~IXGBE_MFLCN_RFCE; | 309 | reg &= ~IXGBE_MFLCN_RFCE; |
293 | reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; | 310 | reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; |
311 | |||
312 | if (hw->mac.type == ixgbe_mac_X540) | ||
313 | reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; | ||
314 | |||
294 | IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); | 315 | IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); |
295 | 316 | ||
296 | } else { | 317 | } else { |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h index 148fd8b477a9..2de71a503153 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h | |||
@@ -92,8 +92,10 @@ | |||
92 | #define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ | 92 | #define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ |
93 | #define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ | 93 | #define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ |
94 | #define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ | 94 | #define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ |
95 | #define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ | ||
95 | 96 | ||
96 | #define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ | 97 | #define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ |
98 | #define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ | ||
97 | 99 | ||
98 | /* SECTXMINIFG DCB */ | 100 | /* SECTXMINIFG DCB */ |
99 | #define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ | 101 | #define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 327c8614198c..5e7ed225851a 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -347,18 +347,28 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, | |||
347 | static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | 347 | static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) |
348 | { | 348 | { |
349 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 349 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
350 | struct dcb_app app = { | ||
351 | .selector = DCB_APP_IDTYPE_ETHTYPE, | ||
352 | .protocol = ETH_P_FCOE, | ||
353 | }; | ||
354 | u8 up = dcb_getapp(netdev, &app); | ||
350 | int ret; | 355 | int ret; |
351 | 356 | ||
352 | if (!adapter->dcb_set_bitmap || | ||
353 | !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) | ||
354 | return DCB_NO_HW_CHG; | ||
355 | |||
356 | ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, | 357 | ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, |
357 | MAX_TRAFFIC_CLASS); | 358 | MAX_TRAFFIC_CLASS); |
358 | |||
359 | if (ret) | 359 | if (ret) |
360 | return DCB_NO_HW_CHG; | 360 | return DCB_NO_HW_CHG; |
361 | 361 | ||
362 | /* In IEEE mode app data must be parsed into DCBX format for | ||
363 | * hardware routines. | ||
364 | */ | ||
365 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) | ||
366 | up = (1 << up); | ||
367 | |||
368 | #ifdef IXGBE_FCOE | ||
369 | if (up && (up != (1 << adapter->fcoe.up))) | ||
370 | adapter->dcb_set_bitmap |= BIT_APP_UPCHG; | ||
371 | |||
362 | /* | 372 | /* |
363 | * Only take down the adapter if an app change occurred. FCoE | 373 | * Only take down the adapter if an app change occurred. FCoE |
364 | * may shuffle tx rings in this case and this can not be done | 374 | * may shuffle tx rings in this case and this can not be done |
@@ -366,12 +376,15 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | |||
366 | */ | 376 | */ |
367 | if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { | 377 | if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { |
368 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 378 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
369 | msleep(1); | 379 | usleep_range(1000, 2000); |
380 | |||
381 | ixgbe_fcoe_setapp(adapter, up); | ||
370 | 382 | ||
371 | if (netif_running(netdev)) | 383 | if (netif_running(netdev)) |
372 | netdev->netdev_ops->ndo_stop(netdev); | 384 | netdev->netdev_ops->ndo_stop(netdev); |
373 | ixgbe_clear_interrupt_scheme(adapter); | 385 | ixgbe_clear_interrupt_scheme(adapter); |
374 | } | 386 | } |
387 | #endif | ||
375 | 388 | ||
376 | if (adapter->dcb_cfg.pfc_mode_enable) { | 389 | if (adapter->dcb_cfg.pfc_mode_enable) { |
377 | switch (adapter->hw.mac.type) { | 390 | switch (adapter->hw.mac.type) { |
@@ -399,12 +412,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | |||
399 | } | 412 | } |
400 | } | 413 | } |
401 | 414 | ||
415 | #ifdef IXGBE_FCOE | ||
402 | if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { | 416 | if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { |
403 | ixgbe_init_interrupt_scheme(adapter); | 417 | ixgbe_init_interrupt_scheme(adapter); |
404 | if (netif_running(netdev)) | 418 | if (netif_running(netdev)) |
405 | netdev->netdev_ops->ndo_open(netdev); | 419 | netdev->netdev_ops->ndo_open(netdev); |
406 | ret = DCB_HW_CHG_RST; | 420 | ret = DCB_HW_CHG_RST; |
407 | } | 421 | } |
422 | #endif | ||
408 | 423 | ||
409 | if (adapter->dcb_set_bitmap & BIT_PFC) { | 424 | if (adapter->dcb_set_bitmap & BIT_PFC) { |
410 | u8 pfc_en; | 425 | u8 pfc_en; |
@@ -558,68 +573,6 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) | |||
558 | return dcb_getapp(netdev, &app); | 573 | return dcb_getapp(netdev, &app); |
559 | } | 574 | } |
560 | 575 | ||
561 | /** | ||
562 | * ixgbe_dcbnl_setapp - set the DCBX application user priority | ||
563 | * @netdev : the corresponding netdev | ||
564 | * @idtype : identifies the id as ether type or TCP/UDP port number | ||
565 | * @id: id is either ether type or TCP/UDP port number | ||
566 | * @up: the 802.1p user priority bitmap | ||
567 | * | ||
568 | * Returns : 0 on success or 1 on error | ||
569 | */ | ||
570 | static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, | ||
571 | u8 idtype, u16 id, u8 up) | ||
572 | { | ||
573 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
574 | u8 rval = 1; | ||
575 | struct dcb_app app = { | ||
576 | .selector = idtype, | ||
577 | .protocol = id, | ||
578 | .priority = up | ||
579 | }; | ||
580 | |||
581 | if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) | ||
582 | return rval; | ||
583 | |||
584 | rval = dcb_setapp(netdev, &app); | ||
585 | |||
586 | switch (idtype) { | ||
587 | case DCB_APP_IDTYPE_ETHTYPE: | ||
588 | #ifdef IXGBE_FCOE | ||
589 | if (id == ETH_P_FCOE) { | ||
590 | u8 old_tc; | ||
591 | |||
592 | /* Get current programmed tc */ | ||
593 | old_tc = adapter->fcoe.tc; | ||
594 | rval = ixgbe_fcoe_setapp(adapter, up); | ||
595 | |||
596 | if (rval || | ||
597 | !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || | ||
598 | !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
599 | break; | ||
600 | |||
601 | /* The FCoE application priority may be changed multiple | ||
602 | * times in quick succession with switches that build up | ||
603 | * TLVs. To avoid creating uneeded device resets this | ||
604 | * checks the actual HW configuration and clears | ||
605 | * BIT_APP_UPCHG if a HW configuration change is not | ||
606 | * need | ||
607 | */ | ||
608 | if (old_tc == adapter->fcoe.tc) | ||
609 | adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG; | ||
610 | else | ||
611 | adapter->dcb_set_bitmap |= BIT_APP_UPCHG; | ||
612 | } | ||
613 | #endif | ||
614 | break; | ||
615 | case DCB_APP_IDTYPE_PORTNUM: | ||
616 | break; | ||
617 | default: | ||
618 | break; | ||
619 | } | ||
620 | return rval; | ||
621 | } | ||
622 | |||
623 | static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, | 576 | static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, |
624 | struct ieee_ets *ets) | 577 | struct ieee_ets *ets) |
625 | { | 578 | { |
@@ -745,25 +698,14 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, | |||
745 | 698 | ||
746 | if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) | 699 | if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) |
747 | return -EINVAL; | 700 | return -EINVAL; |
748 | #ifdef IXGBE_FCOE | ||
749 | if (app->selector == 1 && app->protocol == ETH_P_FCOE) { | ||
750 | if (adapter->fcoe.tc == app->priority) | ||
751 | goto setapp; | ||
752 | 701 | ||
753 | /* In IEEE mode map up to tc 1:1 */ | 702 | dcb_setapp(dev, app); |
754 | adapter->fcoe.tc = app->priority; | ||
755 | adapter->fcoe.up = app->priority; | ||
756 | 703 | ||
757 | /* Force hardware reset required to push FCoE | 704 | #ifdef IXGBE_FCOE |
758 | * setup on {tx|rx}_rings | 705 | if (app->selector == 1 && app->protocol == ETH_P_FCOE && |
759 | */ | 706 | adapter->fcoe.tc == app->priority) |
760 | adapter->dcb_set_bitmap |= BIT_APP_UPCHG; | ||
761 | ixgbe_dcbnl_set_all(dev); | 707 | ixgbe_dcbnl_set_all(dev); |
762 | } | ||
763 | |||
764 | setapp: | ||
765 | #endif | 708 | #endif |
766 | dcb_setapp(dev, app); | ||
767 | return 0; | 709 | return 0; |
768 | } | 710 | } |
769 | 711 | ||
@@ -838,7 +780,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { | |||
838 | .getpfcstate = ixgbe_dcbnl_getpfcstate, | 780 | .getpfcstate = ixgbe_dcbnl_getpfcstate, |
839 | .setpfcstate = ixgbe_dcbnl_setpfcstate, | 781 | .setpfcstate = ixgbe_dcbnl_setpfcstate, |
840 | .getapp = ixgbe_dcbnl_getapp, | 782 | .getapp = ixgbe_dcbnl_getapp, |
841 | .setapp = ixgbe_dcbnl_setapp, | ||
842 | .getdcbx = ixgbe_dcbnl_getdcbx, | 783 | .getdcbx = ixgbe_dcbnl_getdcbx, |
843 | .setdcbx = ixgbe_dcbnl_setdcbx, | 784 | .setdcbx = ixgbe_dcbnl_setdcbx, |
844 | }; | 785 | }; |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 76380a2b35aa..cb1555bc8548 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -84,6 +84,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
84 | {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, | 84 | {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, |
85 | {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, | 85 | {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, |
86 | {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, | 86 | {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, |
87 | {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, | ||
87 | {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, | 88 | {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, |
88 | {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, | 89 | {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, |
89 | {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, | 90 | {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, |
@@ -102,6 +103,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
102 | {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, | 103 | {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, |
103 | {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, | 104 | {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, |
104 | {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, | 105 | {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, |
106 | {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, | ||
107 | {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, | ||
108 | {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, | ||
109 | {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, | ||
105 | #ifdef IXGBE_FCOE | 110 | #ifdef IXGBE_FCOE |
106 | {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, | 111 | {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, |
107 | {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, | 112 | {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, |
@@ -288,20 +293,20 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
288 | if (link_up) { | 293 | if (link_up) { |
289 | switch (link_speed) { | 294 | switch (link_speed) { |
290 | case IXGBE_LINK_SPEED_10GB_FULL: | 295 | case IXGBE_LINK_SPEED_10GB_FULL: |
291 | ecmd->speed = SPEED_10000; | 296 | ethtool_cmd_speed_set(ecmd, SPEED_10000); |
292 | break; | 297 | break; |
293 | case IXGBE_LINK_SPEED_1GB_FULL: | 298 | case IXGBE_LINK_SPEED_1GB_FULL: |
294 | ecmd->speed = SPEED_1000; | 299 | ethtool_cmd_speed_set(ecmd, SPEED_1000); |
295 | break; | 300 | break; |
296 | case IXGBE_LINK_SPEED_100_FULL: | 301 | case IXGBE_LINK_SPEED_100_FULL: |
297 | ecmd->speed = SPEED_100; | 302 | ethtool_cmd_speed_set(ecmd, SPEED_100); |
298 | break; | 303 | break; |
299 | default: | 304 | default: |
300 | break; | 305 | break; |
301 | } | 306 | } |
302 | ecmd->duplex = DUPLEX_FULL; | 307 | ecmd->duplex = DUPLEX_FULL; |
303 | } else { | 308 | } else { |
304 | ecmd->speed = -1; | 309 | ethtool_cmd_speed_set(ecmd, -1); |
305 | ecmd->duplex = -1; | 310 | ecmd->duplex = -1; |
306 | } | 311 | } |
307 | 312 | ||
@@ -346,9 +351,10 @@ static int ixgbe_set_settings(struct net_device *netdev, | |||
346 | } | 351 | } |
347 | } else { | 352 | } else { |
348 | /* in this case we currently only support 10Gb/FULL */ | 353 | /* in this case we currently only support 10Gb/FULL */ |
354 | u32 speed = ethtool_cmd_speed(ecmd); | ||
349 | if ((ecmd->autoneg == AUTONEG_ENABLE) || | 355 | if ((ecmd->autoneg == AUTONEG_ENABLE) || |
350 | (ecmd->advertising != ADVERTISED_10000baseT_Full) || | 356 | (ecmd->advertising != ADVERTISED_10000baseT_Full) || |
351 | (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) | 357 | (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) |
352 | return -EINVAL; | 358 | return -EINVAL; |
353 | } | 359 | } |
354 | 360 | ||
@@ -846,11 +852,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev, | |||
846 | if (!eeprom_buff) | 852 | if (!eeprom_buff) |
847 | return -ENOMEM; | 853 | return -ENOMEM; |
848 | 854 | ||
849 | for (i = 0; i < eeprom_len; i++) { | 855 | ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, |
850 | if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, | 856 | eeprom_buff); |
851 | &eeprom_buff[i]))) | ||
852 | break; | ||
853 | } | ||
854 | 857 | ||
855 | /* Device's eeprom is always little-endian, word addressable */ | 858 | /* Device's eeprom is always little-endian, word addressable */ |
856 | for (i = 0; i < eeprom_len; i++) | 859 | for (i = 0; i < eeprom_len; i++) |
@@ -931,7 +934,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
931 | } | 934 | } |
932 | 935 | ||
933 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 936 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
934 | msleep(1); | 937 | usleep_range(1000, 2000); |
935 | 938 | ||
936 | if (!netif_running(adapter->netdev)) { | 939 | if (!netif_running(adapter->netdev)) { |
937 | for (i = 0; i < adapter->num_tx_queues; i++) | 940 | for (i = 0; i < adapter->num_tx_queues; i++) |
@@ -1030,9 +1033,6 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) | |||
1030 | return IXGBE_TEST_LEN; | 1033 | return IXGBE_TEST_LEN; |
1031 | case ETH_SS_STATS: | 1034 | case ETH_SS_STATS: |
1032 | return IXGBE_STATS_LEN; | 1035 | return IXGBE_STATS_LEN; |
1033 | case ETH_SS_NTUPLE_FILTERS: | ||
1034 | return ETHTOOL_MAX_NTUPLE_LIST_ENTRY * | ||
1035 | ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY; | ||
1036 | default: | 1036 | default: |
1037 | return -EOPNOTSUPP; | 1037 | return -EOPNOTSUPP; |
1038 | } | 1038 | } |
@@ -1238,46 +1238,62 @@ static const struct ixgbe_reg_test reg_test_82598[] = { | |||
1238 | { 0, 0, 0, 0 } | 1238 | { 0, 0, 0, 0 } |
1239 | }; | 1239 | }; |
1240 | 1240 | ||
1241 | static const u32 register_test_patterns[] = { | 1241 | static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, |
1242 | 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF | 1242 | u32 mask, u32 write) |
1243 | }; | 1243 | { |
1244 | 1244 | u32 pat, val, before; | |
1245 | #define REG_PATTERN_TEST(R, M, W) \ | 1245 | static const u32 test_pattern[] = { |
1246 | { \ | 1246 | 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; |
1247 | u32 pat, val, before; \ | 1247 | |
1248 | for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ | 1248 | for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { |
1249 | before = readl(adapter->hw.hw_addr + R); \ | 1249 | before = readl(adapter->hw.hw_addr + reg); |
1250 | writel((register_test_patterns[pat] & W), \ | 1250 | writel((test_pattern[pat] & write), |
1251 | (adapter->hw.hw_addr + R)); \ | 1251 | (adapter->hw.hw_addr + reg)); |
1252 | val = readl(adapter->hw.hw_addr + R); \ | 1252 | val = readl(adapter->hw.hw_addr + reg); |
1253 | if (val != (register_test_patterns[pat] & W & M)) { \ | 1253 | if (val != (test_pattern[pat] & write & mask)) { |
1254 | e_err(drv, "pattern test reg %04X failed: got " \ | 1254 | e_err(drv, "pattern test reg %04X failed: got " |
1255 | "0x%08X expected 0x%08X\n", \ | 1255 | "0x%08X expected 0x%08X\n", |
1256 | R, val, (register_test_patterns[pat] & W & M)); \ | 1256 | reg, val, (test_pattern[pat] & write & mask)); |
1257 | *data = R; \ | 1257 | *data = reg; |
1258 | writel(before, adapter->hw.hw_addr + R); \ | 1258 | writel(before, adapter->hw.hw_addr + reg); |
1259 | return 1; \ | 1259 | return 1; |
1260 | } \ | 1260 | } |
1261 | writel(before, adapter->hw.hw_addr + R); \ | 1261 | writel(before, adapter->hw.hw_addr + reg); |
1262 | } \ | 1262 | } |
1263 | return 0; | ||
1263 | } | 1264 | } |
1264 | 1265 | ||
1265 | #define REG_SET_AND_CHECK(R, M, W) \ | 1266 | static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, |
1266 | { \ | 1267 | u32 mask, u32 write) |
1267 | u32 val, before; \ | 1268 | { |
1268 | before = readl(adapter->hw.hw_addr + R); \ | 1269 | u32 val, before; |
1269 | writel((W & M), (adapter->hw.hw_addr + R)); \ | 1270 | before = readl(adapter->hw.hw_addr + reg); |
1270 | val = readl(adapter->hw.hw_addr + R); \ | 1271 | writel((write & mask), (adapter->hw.hw_addr + reg)); |
1271 | if ((W & M) != (val & M)) { \ | 1272 | val = readl(adapter->hw.hw_addr + reg); |
1272 | e_err(drv, "set/check reg %04X test failed: got 0x%08X " \ | 1273 | if ((write & mask) != (val & mask)) { |
1273 | "expected 0x%08X\n", R, (val & M), (W & M)); \ | 1274 | e_err(drv, "set/check reg %04X test failed: got 0x%08X " |
1274 | *data = R; \ | 1275 | "expected 0x%08X\n", reg, (val & mask), (write & mask)); |
1275 | writel(before, (adapter->hw.hw_addr + R)); \ | 1276 | *data = reg; |
1276 | return 1; \ | 1277 | writel(before, (adapter->hw.hw_addr + reg)); |
1277 | } \ | 1278 | return 1; |
1278 | writel(before, (adapter->hw.hw_addr + R)); \ | 1279 | } |
1280 | writel(before, (adapter->hw.hw_addr + reg)); | ||
1281 | return 0; | ||
1279 | } | 1282 | } |
1280 | 1283 | ||
1284 | #define REG_PATTERN_TEST(reg, mask, write) \ | ||
1285 | do { \ | ||
1286 | if (reg_pattern_test(adapter, data, reg, mask, write)) \ | ||
1287 | return 1; \ | ||
1288 | } while (0) \ | ||
1289 | |||
1290 | |||
1291 | #define REG_SET_AND_CHECK(reg, mask, write) \ | ||
1292 | do { \ | ||
1293 | if (reg_set_and_check(adapter, data, reg, mask, write)) \ | ||
1294 | return 1; \ | ||
1295 | } while (0) \ | ||
1296 | |||
1281 | static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) | 1297 | static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) |
1282 | { | 1298 | { |
1283 | const struct ixgbe_reg_test *test; | 1299 | const struct ixgbe_reg_test *test; |
@@ -1328,13 +1344,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1328 | switch (test->test_type) { | 1344 | switch (test->test_type) { |
1329 | case PATTERN_TEST: | 1345 | case PATTERN_TEST: |
1330 | REG_PATTERN_TEST(test->reg + (i * 0x40), | 1346 | REG_PATTERN_TEST(test->reg + (i * 0x40), |
1331 | test->mask, | 1347 | test->mask, |
1332 | test->write); | 1348 | test->write); |
1333 | break; | 1349 | break; |
1334 | case SET_READ_TEST: | 1350 | case SET_READ_TEST: |
1335 | REG_SET_AND_CHECK(test->reg + (i * 0x40), | 1351 | REG_SET_AND_CHECK(test->reg + (i * 0x40), |
1336 | test->mask, | 1352 | test->mask, |
1337 | test->write); | 1353 | test->write); |
1338 | break; | 1354 | break; |
1339 | case WRITE_NO_TEST: | 1355 | case WRITE_NO_TEST: |
1340 | writel(test->write, | 1356 | writel(test->write, |
@@ -1343,18 +1359,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1343 | break; | 1359 | break; |
1344 | case TABLE32_TEST: | 1360 | case TABLE32_TEST: |
1345 | REG_PATTERN_TEST(test->reg + (i * 4), | 1361 | REG_PATTERN_TEST(test->reg + (i * 4), |
1346 | test->mask, | 1362 | test->mask, |
1347 | test->write); | 1363 | test->write); |
1348 | break; | 1364 | break; |
1349 | case TABLE64_TEST_LO: | 1365 | case TABLE64_TEST_LO: |
1350 | REG_PATTERN_TEST(test->reg + (i * 8), | 1366 | REG_PATTERN_TEST(test->reg + (i * 8), |
1351 | test->mask, | 1367 | test->mask, |
1352 | test->write); | 1368 | test->write); |
1353 | break; | 1369 | break; |
1354 | case TABLE64_TEST_HI: | 1370 | case TABLE64_TEST_HI: |
1355 | REG_PATTERN_TEST((test->reg + 4) + (i * 8), | 1371 | REG_PATTERN_TEST((test->reg + 4) + (i * 8), |
1356 | test->mask, | 1372 | test->mask, |
1357 | test->write); | 1373 | test->write); |
1358 | break; | 1374 | break; |
1359 | } | 1375 | } |
1360 | } | 1376 | } |
@@ -1417,7 +1433,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1417 | 1433 | ||
1418 | /* Disable all the interrupts */ | 1434 | /* Disable all the interrupts */ |
1419 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | 1435 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); |
1420 | msleep(10); | 1436 | usleep_range(10000, 20000); |
1421 | 1437 | ||
1422 | /* Test each interrupt */ | 1438 | /* Test each interrupt */ |
1423 | for (; i < 10; i++) { | 1439 | for (; i < 10; i++) { |
@@ -1437,7 +1453,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1437 | ~mask & 0x00007FFF); | 1453 | ~mask & 0x00007FFF); |
1438 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | 1454 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, |
1439 | ~mask & 0x00007FFF); | 1455 | ~mask & 0x00007FFF); |
1440 | msleep(10); | 1456 | usleep_range(10000, 20000); |
1441 | 1457 | ||
1442 | if (adapter->test_icr & mask) { | 1458 | if (adapter->test_icr & mask) { |
1443 | *data = 3; | 1459 | *data = 3; |
@@ -1454,7 +1470,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1454 | adapter->test_icr = 0; | 1470 | adapter->test_icr = 0; |
1455 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1471 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1456 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | 1472 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); |
1457 | msleep(10); | 1473 | usleep_range(10000, 20000); |
1458 | 1474 | ||
1459 | if (!(adapter->test_icr &mask)) { | 1475 | if (!(adapter->test_icr &mask)) { |
1460 | *data = 4; | 1476 | *data = 4; |
@@ -1474,7 +1490,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1474 | ~mask & 0x00007FFF); | 1490 | ~mask & 0x00007FFF); |
1475 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | 1491 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, |
1476 | ~mask & 0x00007FFF); | 1492 | ~mask & 0x00007FFF); |
1477 | msleep(10); | 1493 | usleep_range(10000, 20000); |
1478 | 1494 | ||
1479 | if (adapter->test_icr) { | 1495 | if (adapter->test_icr) { |
1480 | *data = 5; | 1496 | *data = 5; |
@@ -1485,7 +1501,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1485 | 1501 | ||
1486 | /* Disable all the interrupts */ | 1502 | /* Disable all the interrupts */ |
1487 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | 1503 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); |
1488 | msleep(10); | 1504 | usleep_range(10000, 20000); |
1489 | 1505 | ||
1490 | /* Unhook test interrupt handler */ | 1506 | /* Unhook test interrupt handler */ |
1491 | free_irq(irq, netdev); | 1507 | free_irq(irq, netdev); |
@@ -1598,6 +1614,13 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) | |||
1598 | struct ixgbe_hw *hw = &adapter->hw; | 1614 | struct ixgbe_hw *hw = &adapter->hw; |
1599 | u32 reg_data; | 1615 | u32 reg_data; |
1600 | 1616 | ||
1617 | /* X540 needs to set the MACC.FLU bit to force link up */ | ||
1618 | if (adapter->hw.mac.type == ixgbe_mac_X540) { | ||
1619 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC); | ||
1620 | reg_data |= IXGBE_MACC_FLU; | ||
1621 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data); | ||
1622 | } | ||
1623 | |||
1601 | /* right now we only support MAC loopback in the driver */ | 1624 | /* right now we only support MAC loopback in the driver */ |
1602 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); | 1625 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); |
1603 | /* Setup MAC loopback */ | 1626 | /* Setup MAC loopback */ |
@@ -1613,7 +1636,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) | |||
1613 | reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; | 1636 | reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; |
1614 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); | 1637 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); |
1615 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1638 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1616 | msleep(10); | 1639 | usleep_range(10000, 20000); |
1617 | 1640 | ||
1618 | /* Disable Atlas Tx lanes; re-enabled in reset path */ | 1641 | /* Disable Atlas Tx lanes; re-enabled in reset path */ |
1619 | if (hw->mac.type == ixgbe_mac_82598EB) { | 1642 | if (hw->mac.type == ixgbe_mac_82598EB) { |
@@ -1999,25 +2022,30 @@ static int ixgbe_nway_reset(struct net_device *netdev) | |||
1999 | return 0; | 2022 | return 0; |
2000 | } | 2023 | } |
2001 | 2024 | ||
2002 | static int ixgbe_phys_id(struct net_device *netdev, u32 data) | 2025 | static int ixgbe_set_phys_id(struct net_device *netdev, |
2026 | enum ethtool_phys_id_state state) | ||
2003 | { | 2027 | { |
2004 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2028 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2005 | struct ixgbe_hw *hw = &adapter->hw; | 2029 | struct ixgbe_hw *hw = &adapter->hw; |
2006 | u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
2007 | u32 i; | ||
2008 | 2030 | ||
2009 | if (!data || data > 300) | 2031 | switch (state) { |
2010 | data = 300; | 2032 | case ETHTOOL_ID_ACTIVE: |
2033 | adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
2034 | return 2; | ||
2011 | 2035 | ||
2012 | for (i = 0; i < (data * 1000); i += 400) { | 2036 | case ETHTOOL_ID_ON: |
2013 | hw->mac.ops.led_on(hw, IXGBE_LED_ON); | 2037 | hw->mac.ops.led_on(hw, IXGBE_LED_ON); |
2014 | msleep_interruptible(200); | 2038 | break; |
2039 | |||
2040 | case ETHTOOL_ID_OFF: | ||
2015 | hw->mac.ops.led_off(hw, IXGBE_LED_ON); | 2041 | hw->mac.ops.led_off(hw, IXGBE_LED_ON); |
2016 | msleep_interruptible(200); | 2042 | break; |
2017 | } | ||
2018 | 2043 | ||
2019 | /* Restore LED settings */ | 2044 | case ETHTOOL_ID_INACTIVE: |
2020 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); | 2045 | /* Restore LED settings */ |
2046 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); | ||
2047 | break; | ||
2048 | } | ||
2021 | 2049 | ||
2022 | return 0; | 2050 | return 0; |
2023 | } | 2051 | } |
@@ -2230,8 +2258,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) | |||
2230 | need_reset = (data & ETH_FLAG_RXVLAN) != | 2258 | need_reset = (data & ETH_FLAG_RXVLAN) != |
2231 | (netdev->features & NETIF_F_HW_VLAN_RX); | 2259 | (netdev->features & NETIF_F_HW_VLAN_RX); |
2232 | 2260 | ||
2261 | if ((data & ETH_FLAG_RXHASH) && | ||
2262 | !(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
2263 | return -EOPNOTSUPP; | ||
2264 | |||
2233 | rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE | | 2265 | rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE | |
2234 | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); | 2266 | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | |
2267 | ETH_FLAG_RXHASH); | ||
2235 | if (rc) | 2268 | if (rc) |
2236 | return rc; | 2269 | return rc; |
2237 | 2270 | ||
@@ -2465,7 +2498,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { | |||
2465 | .set_tso = ixgbe_set_tso, | 2498 | .set_tso = ixgbe_set_tso, |
2466 | .self_test = ixgbe_diag_test, | 2499 | .self_test = ixgbe_diag_test, |
2467 | .get_strings = ixgbe_get_strings, | 2500 | .get_strings = ixgbe_get_strings, |
2468 | .phys_id = ixgbe_phys_id, | 2501 | .set_phys_id = ixgbe_set_phys_id, |
2469 | .get_sset_count = ixgbe_get_sset_count, | 2502 | .get_sset_count = ixgbe_get_sset_count, |
2470 | .get_ethtool_stats = ixgbe_get_ethtool_stats, | 2503 | .get_ethtool_stats = ixgbe_get_ethtool_stats, |
2471 | .get_coalesce = ixgbe_get_coalesce, | 2504 | .get_coalesce = ixgbe_get_coalesce, |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index dba7d77588ef..05920726e824 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -416,8 +416,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
416 | if (!ddp->udl) | 416 | if (!ddp->udl) |
417 | goto ddp_out; | 417 | goto ddp_out; |
418 | 418 | ||
419 | ddp->err = (fcerr | fceofe); | 419 | if (fcerr | fceofe) |
420 | if (ddp->err) | ||
421 | goto ddp_out; | 420 | goto ddp_out; |
422 | 421 | ||
423 | fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); | 422 | fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); |
@@ -428,6 +427,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |||
428 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { | 427 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { |
429 | pci_unmap_sg(adapter->pdev, ddp->sgl, | 428 | pci_unmap_sg(adapter->pdev, ddp->sgl, |
430 | ddp->sgc, DMA_FROM_DEVICE); | 429 | ddp->sgc, DMA_FROM_DEVICE); |
430 | ddp->err = (fcerr | fceofe); | ||
431 | ddp->sgl = NULL; | 431 | ddp->sgl = NULL; |
432 | ddp->sgc = 0; | 432 | ddp->sgc = 0; |
433 | } | 433 | } |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e145f2c455cb..fa01b0b03b77 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -51,8 +51,12 @@ | |||
51 | char ixgbe_driver_name[] = "ixgbe"; | 51 | char ixgbe_driver_name[] = "ixgbe"; |
52 | static const char ixgbe_driver_string[] = | 52 | static const char ixgbe_driver_string[] = |
53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
54 | 54 | #define MAJ 3 | |
55 | #define DRV_VERSION "3.2.9-k2" | 55 | #define MIN 3 |
56 | #define BUILD 8 | ||
57 | #define KFIX 2 | ||
58 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ | ||
59 | __stringify(BUILD) "-k" __stringify(KFIX) | ||
56 | const char ixgbe_driver_version[] = DRV_VERSION; | 60 | const char ixgbe_driver_version[] = DRV_VERSION; |
57 | static const char ixgbe_copyright[] = | 61 | static const char ixgbe_copyright[] = |
58 | "Copyright (c) 1999-2011 Intel Corporation."; | 62 | "Copyright (c) 1999-2011 Intel Corporation."; |
@@ -120,6 +124,10 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { | |||
120 | board_82599 }, | 124 | board_82599 }, |
121 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), | 125 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), |
122 | board_X540 }, | 126 | board_X540 }, |
127 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), | ||
128 | board_82599 }, | ||
129 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), | ||
130 | board_82599 }, | ||
123 | 131 | ||
124 | /* required last entry */ | 132 | /* required last entry */ |
125 | {0, } | 133 | {0, } |
@@ -185,6 +193,22 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | |||
185 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | 193 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
186 | } | 194 | } |
187 | 195 | ||
196 | static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) | ||
197 | { | ||
198 | if (!test_bit(__IXGBE_DOWN, &adapter->state) && | ||
199 | !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) | ||
200 | schedule_work(&adapter->service_task); | ||
201 | } | ||
202 | |||
203 | static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) | ||
204 | { | ||
205 | BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); | ||
206 | |||
207 | /* flush memory to make sure state is correct before next watchog */ | ||
208 | smp_mb__before_clear_bit(); | ||
209 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); | ||
210 | } | ||
211 | |||
188 | struct ixgbe_reg_info { | 212 | struct ixgbe_reg_info { |
189 | u32 ofs; | 213 | u32 ofs; |
190 | char *name; | 214 | char *name; |
@@ -811,7 +835,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) | |||
811 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | 835 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ |
812 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ | 836 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ |
813 | 837 | ||
814 | static void ixgbe_tx_timeout(struct net_device *netdev); | 838 | /** |
839 | * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout | ||
840 | * @adapter: driver private struct | ||
841 | **/ | ||
842 | static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) | ||
843 | { | ||
844 | |||
845 | /* Do the reset outside of interrupt context */ | ||
846 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
847 | adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; | ||
848 | ixgbe_service_event_schedule(adapter); | ||
849 | } | ||
850 | } | ||
815 | 851 | ||
816 | /** | 852 | /** |
817 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes | 853 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes |
@@ -893,7 +929,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
893 | adapter->tx_timeout_count + 1, tx_ring->queue_index); | 929 | adapter->tx_timeout_count + 1, tx_ring->queue_index); |
894 | 930 | ||
895 | /* schedule immediate reset if we believe we hung */ | 931 | /* schedule immediate reset if we believe we hung */ |
896 | ixgbe_tx_timeout(adapter->netdev); | 932 | ixgbe_tx_timeout_reset(adapter); |
897 | 933 | ||
898 | /* the adapter is about to reset, no point in enabling stuff */ | 934 | /* the adapter is about to reset, no point in enabling stuff */ |
899 | return true; | 935 | return true; |
@@ -943,8 +979,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | |||
943 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; | 979 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; |
944 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | 980 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; |
945 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); | 981 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); |
946 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | ||
947 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | ||
948 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); | 982 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); |
949 | } | 983 | } |
950 | 984 | ||
@@ -962,7 +996,6 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |||
962 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | 996 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; |
963 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 997 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
964 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 998 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
965 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
966 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); | 999 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); |
967 | break; | 1000 | break; |
968 | case ixgbe_mac_82599EB: | 1001 | case ixgbe_mac_82599EB: |
@@ -972,7 +1005,6 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |||
972 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | 1005 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << |
973 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | 1006 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); |
974 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 1007 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; |
975 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
976 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); | 1008 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); |
977 | break; | 1009 | break; |
978 | default: | 1010 | default: |
@@ -1061,8 +1093,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
1061 | 1093 | ||
1062 | return 0; | 1094 | return 0; |
1063 | } | 1095 | } |
1064 | |||
1065 | #endif /* CONFIG_IXGBE_DCA */ | 1096 | #endif /* CONFIG_IXGBE_DCA */ |
1097 | |||
1098 | static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, | ||
1099 | struct sk_buff *skb) | ||
1100 | { | ||
1101 | skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); | ||
1102 | } | ||
1103 | |||
1066 | /** | 1104 | /** |
1067 | * ixgbe_receive_skb - Send a completed packet up the stack | 1105 | * ixgbe_receive_skb - Send a completed packet up the stack |
1068 | * @adapter: board private structure | 1106 | * @adapter: board private structure |
@@ -1454,6 +1492,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1454 | } | 1492 | } |
1455 | 1493 | ||
1456 | ixgbe_rx_checksum(adapter, rx_desc, skb); | 1494 | ixgbe_rx_checksum(adapter, rx_desc, skb); |
1495 | if (adapter->netdev->features & NETIF_F_RXHASH) | ||
1496 | ixgbe_rx_hash(rx_desc, skb); | ||
1457 | 1497 | ||
1458 | /* probably a little skewed due to removing CRC */ | 1498 | /* probably a little skewed due to removing CRC */ |
1459 | total_rx_bytes += skb->len; | 1499 | total_rx_bytes += skb->len; |
@@ -1787,35 +1827,51 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1787 | } | 1827 | } |
1788 | 1828 | ||
1789 | /** | 1829 | /** |
1790 | * ixgbe_check_overtemp_task - worker thread to check over tempurature | 1830 | * ixgbe_check_overtemp_subtask - check for over tempurature |
1791 | * @work: pointer to work_struct containing our data | 1831 | * @adapter: pointer to adapter |
1792 | **/ | 1832 | **/ |
1793 | static void ixgbe_check_overtemp_task(struct work_struct *work) | 1833 | static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) |
1794 | { | 1834 | { |
1795 | struct ixgbe_adapter *adapter = container_of(work, | ||
1796 | struct ixgbe_adapter, | ||
1797 | check_overtemp_task); | ||
1798 | struct ixgbe_hw *hw = &adapter->hw; | 1835 | struct ixgbe_hw *hw = &adapter->hw; |
1799 | u32 eicr = adapter->interrupt_event; | 1836 | u32 eicr = adapter->interrupt_event; |
1800 | 1837 | ||
1801 | if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) | 1838 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
1802 | return; | 1839 | return; |
1803 | 1840 | ||
1841 | if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1842 | !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) | ||
1843 | return; | ||
1844 | |||
1845 | adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
1846 | |||
1804 | switch (hw->device_id) { | 1847 | switch (hw->device_id) { |
1805 | case IXGBE_DEV_ID_82599_T3_LOM: { | 1848 | case IXGBE_DEV_ID_82599_T3_LOM: |
1806 | u32 autoneg; | 1849 | /* |
1807 | bool link_up = false; | 1850 | * Since the warning interrupt is for both ports |
1851 | * we don't have to check if: | ||
1852 | * - This interrupt wasn't for our port. | ||
1853 | * - We may have missed the interrupt so always have to | ||
1854 | * check if we got a LSC | ||
1855 | */ | ||
1856 | if (!(eicr & IXGBE_EICR_GPI_SDP0) && | ||
1857 | !(eicr & IXGBE_EICR_LSC)) | ||
1858 | return; | ||
1859 | |||
1860 | if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { | ||
1861 | u32 autoneg; | ||
1862 | bool link_up = false; | ||
1808 | 1863 | ||
1809 | if (hw->mac.ops.check_link) | ||
1810 | hw->mac.ops.check_link(hw, &autoneg, &link_up, false); | 1864 | hw->mac.ops.check_link(hw, &autoneg, &link_up, false); |
1811 | 1865 | ||
1812 | if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || | 1866 | if (link_up) |
1813 | (eicr & IXGBE_EICR_LSC)) | 1867 | return; |
1814 | /* Check if this is due to overtemp */ | 1868 | } |
1815 | if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) | 1869 | |
1816 | break; | 1870 | /* Check if this is not due to overtemp */ |
1817 | return; | 1871 | if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) |
1818 | } | 1872 | return; |
1873 | |||
1874 | break; | ||
1819 | default: | 1875 | default: |
1820 | if (!(eicr & IXGBE_EICR_GPI_SDP0)) | 1876 | if (!(eicr & IXGBE_EICR_GPI_SDP0)) |
1821 | return; | 1877 | return; |
@@ -1825,8 +1881,8 @@ static void ixgbe_check_overtemp_task(struct work_struct *work) | |||
1825 | "Network adapter has been stopped because it has over heated. " | 1881 | "Network adapter has been stopped because it has over heated. " |
1826 | "Restart the computer. If the problem persists, " | 1882 | "Restart the computer. If the problem persists, " |
1827 | "power off the system and replace the adapter\n"); | 1883 | "power off the system and replace the adapter\n"); |
1828 | /* write to clear the interrupt */ | 1884 | |
1829 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); | 1885 | adapter->interrupt_event = 0; |
1830 | } | 1886 | } |
1831 | 1887 | ||
1832 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) | 1888 | static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) |
@@ -1848,15 +1904,19 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) | |||
1848 | if (eicr & IXGBE_EICR_GPI_SDP2) { | 1904 | if (eicr & IXGBE_EICR_GPI_SDP2) { |
1849 | /* Clear the interrupt */ | 1905 | /* Clear the interrupt */ |
1850 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); | 1906 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); |
1851 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1907 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1852 | schedule_work(&adapter->sfp_config_module_task); | 1908 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
1909 | ixgbe_service_event_schedule(adapter); | ||
1910 | } | ||
1853 | } | 1911 | } |
1854 | 1912 | ||
1855 | if (eicr & IXGBE_EICR_GPI_SDP1) { | 1913 | if (eicr & IXGBE_EICR_GPI_SDP1) { |
1856 | /* Clear the interrupt */ | 1914 | /* Clear the interrupt */ |
1857 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | 1915 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); |
1858 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1916 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1859 | schedule_work(&adapter->multispeed_fiber_task); | 1917 | adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; |
1918 | ixgbe_service_event_schedule(adapter); | ||
1919 | } | ||
1860 | } | 1920 | } |
1861 | } | 1921 | } |
1862 | 1922 | ||
@@ -1870,7 +1930,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) | |||
1870 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | 1930 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
1871 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); | 1931 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); |
1872 | IXGBE_WRITE_FLUSH(hw); | 1932 | IXGBE_WRITE_FLUSH(hw); |
1873 | schedule_work(&adapter->watchdog_task); | 1933 | ixgbe_service_event_schedule(adapter); |
1874 | } | 1934 | } |
1875 | } | 1935 | } |
1876 | 1936 | ||
@@ -1898,26 +1958,32 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1898 | 1958 | ||
1899 | switch (hw->mac.type) { | 1959 | switch (hw->mac.type) { |
1900 | case ixgbe_mac_82599EB: | 1960 | case ixgbe_mac_82599EB: |
1901 | ixgbe_check_sfp_event(adapter, eicr); | ||
1902 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1903 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
1904 | adapter->interrupt_event = eicr; | ||
1905 | schedule_work(&adapter->check_overtemp_task); | ||
1906 | } | ||
1907 | /* now fallthrough to handle Flow Director */ | ||
1908 | case ixgbe_mac_X540: | 1961 | case ixgbe_mac_X540: |
1909 | /* Handle Flow Director Full threshold interrupt */ | 1962 | /* Handle Flow Director Full threshold interrupt */ |
1910 | if (eicr & IXGBE_EICR_FLOW_DIR) { | 1963 | if (eicr & IXGBE_EICR_FLOW_DIR) { |
1964 | int reinit_count = 0; | ||
1911 | int i; | 1965 | int i; |
1912 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); | ||
1913 | /* Disable transmits before FDIR Re-initialization */ | ||
1914 | netif_tx_stop_all_queues(netdev); | ||
1915 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1966 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1916 | struct ixgbe_ring *tx_ring = | 1967 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
1917 | adapter->tx_ring[i]; | ||
1918 | if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, | 1968 | if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, |
1919 | &tx_ring->state)) | 1969 | &ring->state)) |
1920 | schedule_work(&adapter->fdir_reinit_task); | 1970 | reinit_count++; |
1971 | } | ||
1972 | if (reinit_count) { | ||
1973 | /* no more flow director interrupts until after init */ | ||
1974 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); | ||
1975 | eicr &= ~IXGBE_EICR_FLOW_DIR; | ||
1976 | adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; | ||
1977 | ixgbe_service_event_schedule(adapter); | ||
1978 | } | ||
1979 | } | ||
1980 | ixgbe_check_sfp_event(adapter, eicr); | ||
1981 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1982 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
1983 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
1984 | adapter->interrupt_event = eicr; | ||
1985 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
1986 | ixgbe_service_event_schedule(adapter); | ||
1921 | } | 1987 | } |
1922 | } | 1988 | } |
1923 | break; | 1989 | break; |
@@ -1927,8 +1993,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1927 | 1993 | ||
1928 | ixgbe_check_fan_failure(adapter, eicr); | 1994 | ixgbe_check_fan_failure(adapter, eicr); |
1929 | 1995 | ||
1996 | /* re-enable the original interrupt state, no lsc, no queues */ | ||
1930 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1997 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1931 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | 1998 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr & |
1999 | ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE)); | ||
1932 | 2000 | ||
1933 | return IRQ_HANDLED; | 2001 | return IRQ_HANDLED; |
1934 | } | 2002 | } |
@@ -2513,8 +2581,11 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
2513 | ixgbe_check_sfp_event(adapter, eicr); | 2581 | ixgbe_check_sfp_event(adapter, eicr); |
2514 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | 2582 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && |
2515 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | 2583 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { |
2516 | adapter->interrupt_event = eicr; | 2584 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
2517 | schedule_work(&adapter->check_overtemp_task); | 2585 | adapter->interrupt_event = eicr; |
2586 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; | ||
2587 | ixgbe_service_event_schedule(adapter); | ||
2588 | } | ||
2518 | } | 2589 | } |
2519 | break; | 2590 | break; |
2520 | default: | 2591 | default: |
@@ -2731,7 +2802,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2731 | 2802 | ||
2732 | /* poll to verify queue is enabled */ | 2803 | /* poll to verify queue is enabled */ |
2733 | do { | 2804 | do { |
2734 | msleep(1); | 2805 | usleep_range(1000, 2000); |
2735 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | 2806 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
2736 | } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); | 2807 | } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); |
2737 | if (!wait_loop) | 2808 | if (!wait_loop) |
@@ -3023,7 +3094,7 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
3023 | return; | 3094 | return; |
3024 | 3095 | ||
3025 | do { | 3096 | do { |
3026 | msleep(1); | 3097 | usleep_range(1000, 2000); |
3027 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | 3098 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
3028 | } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); | 3099 | } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); |
3029 | 3100 | ||
@@ -3178,7 +3249,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) | |||
3178 | /* enable Tx loopback for VF/PF communication */ | 3249 | /* enable Tx loopback for VF/PF communication */ |
3179 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | 3250 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
3180 | /* Enable MAC Anti-Spoofing */ | 3251 | /* Enable MAC Anti-Spoofing */ |
3181 | hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), | 3252 | hw->mac.ops.set_mac_anti_spoofing(hw, |
3253 | (adapter->antispoofing_enabled = | ||
3254 | (adapter->num_vfs != 0)), | ||
3182 | adapter->num_vfs); | 3255 | adapter->num_vfs); |
3183 | } | 3256 | } |
3184 | 3257 | ||
@@ -3487,7 +3560,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) | |||
3487 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3560 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3488 | struct ixgbe_hw *hw = &adapter->hw; | 3561 | struct ixgbe_hw *hw = &adapter->hw; |
3489 | unsigned int vfn = adapter->num_vfs; | 3562 | unsigned int vfn = adapter->num_vfs; |
3490 | unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1); | 3563 | unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; |
3491 | int count = 0; | 3564 | int count = 0; |
3492 | 3565 | ||
3493 | /* return ENOMEM indicating insufficient memory for addresses */ | 3566 | /* return ENOMEM indicating insufficient memory for addresses */ |
@@ -3760,31 +3833,16 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) | |||
3760 | **/ | 3833 | **/ |
3761 | static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | 3834 | static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) |
3762 | { | 3835 | { |
3763 | struct ixgbe_hw *hw = &adapter->hw; | 3836 | /* |
3837 | * We are assuming the worst case scenerio here, and that | ||
3838 | * is that an SFP was inserted/removed after the reset | ||
3839 | * but before SFP detection was enabled. As such the best | ||
3840 | * solution is to just start searching as soon as we start | ||
3841 | */ | ||
3842 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | ||
3843 | adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; | ||
3764 | 3844 | ||
3765 | if (hw->phy.multispeed_fiber) { | 3845 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
3766 | /* | ||
3767 | * In multispeed fiber setups, the device may not have | ||
3768 | * had a physical connection when the driver loaded. | ||
3769 | * If that's the case, the initial link configuration | ||
3770 | * couldn't get the MAC into 10G or 1G mode, so we'll | ||
3771 | * never have a link status change interrupt fire. | ||
3772 | * We need to try and force an autonegotiation | ||
3773 | * session, then bring up link. | ||
3774 | */ | ||
3775 | if (hw->mac.ops.setup_sfp) | ||
3776 | hw->mac.ops.setup_sfp(hw); | ||
3777 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | ||
3778 | schedule_work(&adapter->multispeed_fiber_task); | ||
3779 | } else { | ||
3780 | /* | ||
3781 | * Direct Attach Cu and non-multispeed fiber modules | ||
3782 | * still need to be configured properly prior to | ||
3783 | * attempting link. | ||
3784 | */ | ||
3785 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK)) | ||
3786 | schedule_work(&adapter->sfp_config_module_task); | ||
3787 | } | ||
3788 | } | 3846 | } |
3789 | 3847 | ||
3790 | /** | 3848 | /** |
@@ -3860,9 +3918,10 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) | |||
3860 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | 3918 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
3861 | gpie |= IXGBE_SDP1_GPIEN; | 3919 | gpie |= IXGBE_SDP1_GPIEN; |
3862 | 3920 | ||
3863 | if (hw->mac.type == ixgbe_mac_82599EB) | 3921 | if (hw->mac.type == ixgbe_mac_82599EB) { |
3864 | gpie |= IXGBE_SDP1_GPIEN; | 3922 | gpie |= IXGBE_SDP1_GPIEN; |
3865 | gpie |= IXGBE_SDP2_GPIEN; | 3923 | gpie |= IXGBE_SDP2_GPIEN; |
3924 | } | ||
3866 | 3925 | ||
3867 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | 3926 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
3868 | } | 3927 | } |
@@ -3913,17 +3972,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3913 | e_crit(drv, "Fan has stopped, replace the adapter\n"); | 3972 | e_crit(drv, "Fan has stopped, replace the adapter\n"); |
3914 | } | 3973 | } |
3915 | 3974 | ||
3916 | /* | ||
3917 | * For hot-pluggable SFP+ devices, a new SFP+ module may have | ||
3918 | * arrived before interrupts were enabled but after probe. Such | ||
3919 | * devices wouldn't have their type identified yet. We need to | ||
3920 | * kick off the SFP+ module setup first, then try to bring up link. | ||
3921 | * If we're not hot-pluggable SFP+, we just need to configure link | ||
3922 | * and bring it up. | ||
3923 | */ | ||
3924 | if (hw->phy.type == ixgbe_phy_none) | ||
3925 | schedule_work(&adapter->sfp_config_module_task); | ||
3926 | |||
3927 | /* enable transmits */ | 3975 | /* enable transmits */ |
3928 | netif_tx_start_all_queues(adapter->netdev); | 3976 | netif_tx_start_all_queues(adapter->netdev); |
3929 | 3977 | ||
@@ -3931,7 +3979,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3931 | * link up interrupt but shouldn't be a problem */ | 3979 | * link up interrupt but shouldn't be a problem */ |
3932 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 3980 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
3933 | adapter->link_check_timeout = jiffies; | 3981 | adapter->link_check_timeout = jiffies; |
3934 | mod_timer(&adapter->watchdog_timer, jiffies); | 3982 | mod_timer(&adapter->service_timer, jiffies); |
3935 | 3983 | ||
3936 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | 3984 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ |
3937 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | 3985 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
@@ -3944,8 +3992,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3944 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) | 3992 | void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) |
3945 | { | 3993 | { |
3946 | WARN_ON(in_interrupt()); | 3994 | WARN_ON(in_interrupt()); |
3995 | /* put off any impending NetWatchDogTimeout */ | ||
3996 | adapter->netdev->trans_start = jiffies; | ||
3997 | |||
3947 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) | 3998 | while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
3948 | msleep(1); | 3999 | usleep_range(1000, 2000); |
3949 | ixgbe_down(adapter); | 4000 | ixgbe_down(adapter); |
3950 | /* | 4001 | /* |
3951 | * If SR-IOV enabled then wait a bit before bringing the adapter | 4002 | * If SR-IOV enabled then wait a bit before bringing the adapter |
@@ -3972,10 +4023,20 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3972 | struct ixgbe_hw *hw = &adapter->hw; | 4023 | struct ixgbe_hw *hw = &adapter->hw; |
3973 | int err; | 4024 | int err; |
3974 | 4025 | ||
4026 | /* lock SFP init bit to prevent race conditions with the watchdog */ | ||
4027 | while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
4028 | usleep_range(1000, 2000); | ||
4029 | |||
4030 | /* clear all SFP and link config related flags while holding SFP_INIT */ | ||
4031 | adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | | ||
4032 | IXGBE_FLAG2_SFP_NEEDS_RESET); | ||
4033 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; | ||
4034 | |||
3975 | err = hw->mac.ops.init_hw(hw); | 4035 | err = hw->mac.ops.init_hw(hw); |
3976 | switch (err) { | 4036 | switch (err) { |
3977 | case 0: | 4037 | case 0: |
3978 | case IXGBE_ERR_SFP_NOT_PRESENT: | 4038 | case IXGBE_ERR_SFP_NOT_PRESENT: |
4039 | case IXGBE_ERR_SFP_NOT_SUPPORTED: | ||
3979 | break; | 4040 | break; |
3980 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: | 4041 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: |
3981 | e_dev_err("master disable timed out\n"); | 4042 | e_dev_err("master disable timed out\n"); |
@@ -3993,6 +4054,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3993 | e_dev_err("Hardware Error: %d\n", err); | 4054 | e_dev_err("Hardware Error: %d\n", err); |
3994 | } | 4055 | } |
3995 | 4056 | ||
4057 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
4058 | |||
3996 | /* reprogram the RAR[0] in case user changed it. */ | 4059 | /* reprogram the RAR[0] in case user changed it. */ |
3997 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, | 4060 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
3998 | IXGBE_RAH_AV); | 4061 | IXGBE_RAH_AV); |
@@ -4121,26 +4184,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4121 | struct net_device *netdev = adapter->netdev; | 4184 | struct net_device *netdev = adapter->netdev; |
4122 | struct ixgbe_hw *hw = &adapter->hw; | 4185 | struct ixgbe_hw *hw = &adapter->hw; |
4123 | u32 rxctrl; | 4186 | u32 rxctrl; |
4124 | u32 txdctl; | ||
4125 | int i; | 4187 | int i; |
4126 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4188 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4127 | 4189 | ||
4128 | /* signal that we are down to the interrupt handler */ | 4190 | /* signal that we are down to the interrupt handler */ |
4129 | set_bit(__IXGBE_DOWN, &adapter->state); | 4191 | set_bit(__IXGBE_DOWN, &adapter->state); |
4130 | 4192 | ||
4131 | /* disable receive for all VFs and wait one second */ | ||
4132 | if (adapter->num_vfs) { | ||
4133 | /* ping all the active vfs to let them know we are going down */ | ||
4134 | ixgbe_ping_all_vfs(adapter); | ||
4135 | |||
4136 | /* Disable all VFTE/VFRE TX/RX */ | ||
4137 | ixgbe_disable_tx_rx(adapter); | ||
4138 | |||
4139 | /* Mark all the VFs as inactive */ | ||
4140 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
4141 | adapter->vfinfo[i].clear_to_send = 0; | ||
4142 | } | ||
4143 | |||
4144 | /* disable receives */ | 4193 | /* disable receives */ |
4145 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 4194 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
4146 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 4195 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
@@ -4150,15 +4199,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4150 | /* this call also flushes the previous write */ | 4199 | /* this call also flushes the previous write */ |
4151 | ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); | 4200 | ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); |
4152 | 4201 | ||
4153 | msleep(10); | 4202 | usleep_range(10000, 20000); |
4154 | 4203 | ||
4155 | netif_tx_stop_all_queues(netdev); | 4204 | netif_tx_stop_all_queues(netdev); |
4156 | 4205 | ||
4157 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 4206 | /* call carrier off first to avoid false dev_watchdog timeouts */ |
4158 | del_timer_sync(&adapter->sfp_timer); | ||
4159 | del_timer_sync(&adapter->watchdog_timer); | ||
4160 | cancel_work_sync(&adapter->watchdog_task); | ||
4161 | |||
4162 | netif_carrier_off(netdev); | 4207 | netif_carrier_off(netdev); |
4163 | netif_tx_disable(netdev); | 4208 | netif_tx_disable(netdev); |
4164 | 4209 | ||
@@ -4166,6 +4211,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4166 | 4211 | ||
4167 | ixgbe_napi_disable_all(adapter); | 4212 | ixgbe_napi_disable_all(adapter); |
4168 | 4213 | ||
4214 | adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | | ||
4215 | IXGBE_FLAG2_RESET_REQUESTED); | ||
4216 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | ||
4217 | |||
4218 | del_timer_sync(&adapter->service_timer); | ||
4219 | |||
4220 | /* disable receive for all VFs and wait one second */ | ||
4221 | if (adapter->num_vfs) { | ||
4222 | /* ping all the active vfs to let them know we are going down */ | ||
4223 | ixgbe_ping_all_vfs(adapter); | ||
4224 | |||
4225 | /* Disable all VFTE/VFRE TX/RX */ | ||
4226 | ixgbe_disable_tx_rx(adapter); | ||
4227 | |||
4228 | /* Mark all the VFs as inactive */ | ||
4229 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
4230 | adapter->vfinfo[i].clear_to_send = 0; | ||
4231 | } | ||
4232 | |||
4169 | /* Cleanup the affinity_hint CPU mask memory and callback */ | 4233 | /* Cleanup the affinity_hint CPU mask memory and callback */ |
4170 | for (i = 0; i < num_q_vectors; i++) { | 4234 | for (i = 0; i < num_q_vectors; i++) { |
4171 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | 4235 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
@@ -4175,21 +4239,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4175 | free_cpumask_var(q_vector->affinity_mask); | 4239 | free_cpumask_var(q_vector->affinity_mask); |
4176 | } | 4240 | } |
4177 | 4241 | ||
4178 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
4179 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
4180 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
4181 | |||
4182 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) | ||
4183 | cancel_work_sync(&adapter->check_overtemp_task); | ||
4184 | |||
4185 | /* disable transmits in the hardware now that interrupts are off */ | 4242 | /* disable transmits in the hardware now that interrupts are off */ |
4186 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4243 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4187 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; | 4244 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; |
4188 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | 4245 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); |
4189 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), | ||
4190 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); | ||
4191 | } | 4246 | } |
4192 | /* Disable the Tx DMA engine on 82599 */ | 4247 | |
4248 | /* Disable the Tx DMA engine on 82599 and X540 */ | ||
4193 | switch (hw->mac.type) { | 4249 | switch (hw->mac.type) { |
4194 | case ixgbe_mac_82599EB: | 4250 | case ixgbe_mac_82599EB: |
4195 | case ixgbe_mac_X540: | 4251 | case ixgbe_mac_X540: |
@@ -4201,9 +4257,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4201 | break; | 4257 | break; |
4202 | } | 4258 | } |
4203 | 4259 | ||
4204 | /* clear n-tuple filters that are cached */ | ||
4205 | ethtool_ntuple_flush(netdev); | ||
4206 | |||
4207 | if (!pci_channel_offline(adapter->pdev)) | 4260 | if (!pci_channel_offline(adapter->pdev)) |
4208 | ixgbe_reset(adapter); | 4261 | ixgbe_reset(adapter); |
4209 | 4262 | ||
@@ -4267,25 +4320,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev) | |||
4267 | { | 4320 | { |
4268 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 4321 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
4269 | 4322 | ||
4270 | adapter->tx_timeout_count++; | ||
4271 | |||
4272 | /* Do the reset outside of interrupt context */ | 4323 | /* Do the reset outside of interrupt context */ |
4273 | schedule_work(&adapter->reset_task); | 4324 | ixgbe_tx_timeout_reset(adapter); |
4274 | } | ||
4275 | |||
4276 | static void ixgbe_reset_task(struct work_struct *work) | ||
4277 | { | ||
4278 | struct ixgbe_adapter *adapter; | ||
4279 | adapter = container_of(work, struct ixgbe_adapter, reset_task); | ||
4280 | |||
4281 | /* If we're already down or resetting, just bail */ | ||
4282 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
4283 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
4284 | return; | ||
4285 | |||
4286 | ixgbe_dump(adapter); | ||
4287 | netdev_err(adapter->netdev, "Reset adapter\n"); | ||
4288 | ixgbe_reinit_locked(adapter); | ||
4289 | } | 4325 | } |
4290 | 4326 | ||
4291 | /** | 4327 | /** |
@@ -4567,8 +4603,8 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | |||
4567 | #ifdef CONFIG_IXGBE_DCB | 4603 | #ifdef CONFIG_IXGBE_DCB |
4568 | 4604 | ||
4569 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ | 4605 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ |
4570 | void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | 4606 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, |
4571 | unsigned int *tx, unsigned int *rx) | 4607 | unsigned int *tx, unsigned int *rx) |
4572 | { | 4608 | { |
4573 | struct net_device *dev = adapter->netdev; | 4609 | struct net_device *dev = adapter->netdev; |
4574 | struct ixgbe_hw *hw = &adapter->hw; | 4610 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -5133,57 +5169,6 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
5133 | } | 5169 | } |
5134 | 5170 | ||
5135 | /** | 5171 | /** |
5136 | * ixgbe_sfp_timer - worker thread to find a missing module | ||
5137 | * @data: pointer to our adapter struct | ||
5138 | **/ | ||
5139 | static void ixgbe_sfp_timer(unsigned long data) | ||
5140 | { | ||
5141 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
5142 | |||
5143 | /* | ||
5144 | * Do the sfp_timer outside of interrupt context due to the | ||
5145 | * delays that sfp+ detection requires | ||
5146 | */ | ||
5147 | schedule_work(&adapter->sfp_task); | ||
5148 | } | ||
5149 | |||
5150 | /** | ||
5151 | * ixgbe_sfp_task - worker thread to find a missing module | ||
5152 | * @work: pointer to work_struct containing our data | ||
5153 | **/ | ||
5154 | static void ixgbe_sfp_task(struct work_struct *work) | ||
5155 | { | ||
5156 | struct ixgbe_adapter *adapter = container_of(work, | ||
5157 | struct ixgbe_adapter, | ||
5158 | sfp_task); | ||
5159 | struct ixgbe_hw *hw = &adapter->hw; | ||
5160 | |||
5161 | if ((hw->phy.type == ixgbe_phy_nl) && | ||
5162 | (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { | ||
5163 | s32 ret = hw->phy.ops.identify_sfp(hw); | ||
5164 | if (ret == IXGBE_ERR_SFP_NOT_PRESENT) | ||
5165 | goto reschedule; | ||
5166 | ret = hw->phy.ops.reset(hw); | ||
5167 | if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
5168 | e_dev_err("failed to initialize because an unsupported " | ||
5169 | "SFP+ module type was detected.\n"); | ||
5170 | e_dev_err("Reload the driver after installing a " | ||
5171 | "supported module.\n"); | ||
5172 | unregister_netdev(adapter->netdev); | ||
5173 | } else { | ||
5174 | e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); | ||
5175 | } | ||
5176 | /* don't need this routine any more */ | ||
5177 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
5178 | } | ||
5179 | return; | ||
5180 | reschedule: | ||
5181 | if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) | ||
5182 | mod_timer(&adapter->sfp_timer, | ||
5183 | round_jiffies(jiffies + (2 * HZ))); | ||
5184 | } | ||
5185 | |||
5186 | /** | ||
5187 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) | 5172 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) |
5188 | * @adapter: board private structure to initialize | 5173 | * @adapter: board private structure to initialize |
5189 | * | 5174 | * |
@@ -5899,8 +5884,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5899 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); | 5884 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); |
5900 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | 5885 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); |
5901 | break; | 5886 | break; |
5902 | case ixgbe_mac_82599EB: | ||
5903 | case ixgbe_mac_X540: | 5887 | case ixgbe_mac_X540: |
5888 | /* OS2BMC stats are X540 only*/ | ||
5889 | hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); | ||
5890 | hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); | ||
5891 | hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); | ||
5892 | hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); | ||
5893 | case ixgbe_mac_82599EB: | ||
5904 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); | 5894 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); |
5905 | IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ | 5895 | IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ |
5906 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); | 5896 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); |
@@ -5974,23 +5964,66 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5974 | } | 5964 | } |
5975 | 5965 | ||
5976 | /** | 5966 | /** |
5977 | * ixgbe_watchdog - Timer Call-back | 5967 | * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table |
5978 | * @data: pointer to adapter cast into an unsigned long | 5968 | * @adapter - pointer to the device adapter structure |
5979 | **/ | 5969 | **/ |
5980 | static void ixgbe_watchdog(unsigned long data) | 5970 | static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) |
5981 | { | 5971 | { |
5982 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
5983 | struct ixgbe_hw *hw = &adapter->hw; | 5972 | struct ixgbe_hw *hw = &adapter->hw; |
5984 | u64 eics = 0; | ||
5985 | int i; | 5973 | int i; |
5986 | 5974 | ||
5987 | /* | 5975 | if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) |
5988 | * Do the watchdog outside of interrupt context due to the lovely | 5976 | return; |
5989 | * delays that some of the newer hardware requires | 5977 | |
5990 | */ | 5978 | adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; |
5991 | 5979 | ||
5980 | /* if interface is down do nothing */ | ||
5992 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | 5981 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
5993 | goto watchdog_short_circuit; | 5982 | return; |
5983 | |||
5984 | /* do nothing if we are not using signature filters */ | ||
5985 | if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) | ||
5986 | return; | ||
5987 | |||
5988 | adapter->fdir_overflow++; | ||
5989 | |||
5990 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | ||
5991 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
5992 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, | ||
5993 | &(adapter->tx_ring[i]->state)); | ||
5994 | /* re-enable flow director interrupts */ | ||
5995 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); | ||
5996 | } else { | ||
5997 | e_err(probe, "failed to finish FDIR re-initialization, " | ||
5998 | "ignored adding FDIR ATR filters\n"); | ||
5999 | } | ||
6000 | } | ||
6001 | |||
6002 | /** | ||
6003 | * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts | ||
6004 | * @adapter - pointer to the device adapter structure | ||
6005 | * | ||
6006 | * This function serves two purposes. First it strobes the interrupt lines | ||
6007 | * in order to make certain interrupts are occuring. Secondly it sets the | ||
6008 | * bits needed to check for TX hangs. As a result we should immediately | ||
6009 | * determine if a hang has occured. | ||
6010 | */ | ||
6011 | static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) | ||
6012 | { | ||
6013 | struct ixgbe_hw *hw = &adapter->hw; | ||
6014 | u64 eics = 0; | ||
6015 | int i; | ||
6016 | |||
6017 | /* If we're down or resetting, just bail */ | ||
6018 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
6019 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
6020 | return; | ||
6021 | |||
6022 | /* Force detection of hung controller */ | ||
6023 | if (netif_carrier_ok(adapter->netdev)) { | ||
6024 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
6025 | set_check_for_tx_hang(adapter->tx_ring[i]); | ||
6026 | } | ||
5994 | 6027 | ||
5995 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { | 6028 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
5996 | /* | 6029 | /* |
@@ -6000,108 +6033,172 @@ static void ixgbe_watchdog(unsigned long data) | |||
6000 | */ | 6033 | */ |
6001 | IXGBE_WRITE_REG(hw, IXGBE_EICS, | 6034 | IXGBE_WRITE_REG(hw, IXGBE_EICS, |
6002 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | 6035 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); |
6003 | goto watchdog_reschedule; | 6036 | } else { |
6004 | } | 6037 | /* get one bit for every active tx/rx interrupt vector */ |
6005 | 6038 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | |
6006 | /* get one bit for every active tx/rx interrupt vector */ | 6039 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; |
6007 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 6040 | if (qv->rxr_count || qv->txr_count) |
6008 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; | 6041 | eics |= ((u64)1 << i); |
6009 | if (qv->rxr_count || qv->txr_count) | 6042 | } |
6010 | eics |= ((u64)1 << i); | ||
6011 | } | 6043 | } |
6012 | 6044 | ||
6013 | /* Cause software interrupt to ensure rx rings are cleaned */ | 6045 | /* Cause software interrupt to ensure rings are cleaned */ |
6014 | ixgbe_irq_rearm_queues(adapter, eics); | 6046 | ixgbe_irq_rearm_queues(adapter, eics); |
6015 | 6047 | ||
6016 | watchdog_reschedule: | ||
6017 | /* Reset the timer */ | ||
6018 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | ||
6019 | |||
6020 | watchdog_short_circuit: | ||
6021 | schedule_work(&adapter->watchdog_task); | ||
6022 | } | 6048 | } |
6023 | 6049 | ||
6024 | /** | 6050 | /** |
6025 | * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber | 6051 | * ixgbe_watchdog_update_link - update the link status |
6026 | * @work: pointer to work_struct containing our data | 6052 | * @adapter - pointer to the device adapter structure |
6053 | * @link_speed - pointer to a u32 to store the link_speed | ||
6027 | **/ | 6054 | **/ |
6028 | static void ixgbe_multispeed_fiber_task(struct work_struct *work) | 6055 | static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) |
6029 | { | 6056 | { |
6030 | struct ixgbe_adapter *adapter = container_of(work, | ||
6031 | struct ixgbe_adapter, | ||
6032 | multispeed_fiber_task); | ||
6033 | struct ixgbe_hw *hw = &adapter->hw; | 6057 | struct ixgbe_hw *hw = &adapter->hw; |
6034 | u32 autoneg; | 6058 | u32 link_speed = adapter->link_speed; |
6035 | bool negotiation; | 6059 | bool link_up = adapter->link_up; |
6060 | int i; | ||
6036 | 6061 | ||
6037 | adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; | 6062 | if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) |
6038 | autoneg = hw->phy.autoneg_advertised; | 6063 | return; |
6039 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | 6064 | |
6040 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); | 6065 | if (hw->mac.ops.check_link) { |
6041 | hw->mac.autotry_restart = false; | 6066 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
6042 | if (hw->mac.ops.setup_link) | 6067 | } else { |
6043 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); | 6068 | /* always assume link is up, if no check link function */ |
6044 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 6069 | link_speed = IXGBE_LINK_SPEED_10GB_FULL; |
6045 | adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; | 6070 | link_up = true; |
6071 | } | ||
6072 | if (link_up) { | ||
6073 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
6074 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | ||
6075 | hw->mac.ops.fc_enable(hw, i); | ||
6076 | } else { | ||
6077 | hw->mac.ops.fc_enable(hw, 0); | ||
6078 | } | ||
6079 | } | ||
6080 | |||
6081 | if (link_up || | ||
6082 | time_after(jiffies, (adapter->link_check_timeout + | ||
6083 | IXGBE_TRY_LINK_TIMEOUT))) { | ||
6084 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | ||
6085 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); | ||
6086 | IXGBE_WRITE_FLUSH(hw); | ||
6087 | } | ||
6088 | |||
6089 | adapter->link_up = link_up; | ||
6090 | adapter->link_speed = link_speed; | ||
6046 | } | 6091 | } |
6047 | 6092 | ||
6048 | /** | 6093 | /** |
6049 | * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module | 6094 | * ixgbe_watchdog_link_is_up - update netif_carrier status and |
6050 | * @work: pointer to work_struct containing our data | 6095 | * print link up message |
6096 | * @adapter - pointer to the device adapter structure | ||
6051 | **/ | 6097 | **/ |
6052 | static void ixgbe_sfp_config_module_task(struct work_struct *work) | 6098 | static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) |
6053 | { | 6099 | { |
6054 | struct ixgbe_adapter *adapter = container_of(work, | 6100 | struct net_device *netdev = adapter->netdev; |
6055 | struct ixgbe_adapter, | ||
6056 | sfp_config_module_task); | ||
6057 | struct ixgbe_hw *hw = &adapter->hw; | 6101 | struct ixgbe_hw *hw = &adapter->hw; |
6058 | u32 err; | 6102 | u32 link_speed = adapter->link_speed; |
6103 | bool flow_rx, flow_tx; | ||
6059 | 6104 | ||
6060 | adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; | 6105 | /* only continue if link was previously down */ |
6106 | if (netif_carrier_ok(netdev)) | ||
6107 | return; | ||
6061 | 6108 | ||
6062 | /* Time for electrical oscillations to settle down */ | 6109 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
6063 | msleep(100); | ||
6064 | err = hw->phy.ops.identify_sfp(hw); | ||
6065 | 6110 | ||
6066 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 6111 | switch (hw->mac.type) { |
6067 | e_dev_err("failed to initialize because an unsupported SFP+ " | 6112 | case ixgbe_mac_82598EB: { |
6068 | "module type was detected.\n"); | 6113 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
6069 | e_dev_err("Reload the driver after installing a supported " | 6114 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); |
6070 | "module.\n"); | 6115 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); |
6071 | unregister_netdev(adapter->netdev); | 6116 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); |
6072 | return; | 6117 | } |
6118 | break; | ||
6119 | case ixgbe_mac_X540: | ||
6120 | case ixgbe_mac_82599EB: { | ||
6121 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | ||
6122 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | ||
6123 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); | ||
6124 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | ||
6125 | } | ||
6126 | break; | ||
6127 | default: | ||
6128 | flow_tx = false; | ||
6129 | flow_rx = false; | ||
6130 | break; | ||
6073 | } | 6131 | } |
6074 | if (hw->mac.ops.setup_sfp) | 6132 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", |
6075 | hw->mac.ops.setup_sfp(hw); | 6133 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? |
6134 | "10 Gbps" : | ||
6135 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | ||
6136 | "1 Gbps" : | ||
6137 | (link_speed == IXGBE_LINK_SPEED_100_FULL ? | ||
6138 | "100 Mbps" : | ||
6139 | "unknown speed"))), | ||
6140 | ((flow_rx && flow_tx) ? "RX/TX" : | ||
6141 | (flow_rx ? "RX" : | ||
6142 | (flow_tx ? "TX" : "None")))); | ||
6076 | 6143 | ||
6077 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 6144 | netif_carrier_on(netdev); |
6078 | /* This will also work for DA Twinax connections */ | 6145 | #ifdef HAVE_IPLINK_VF_CONFIG |
6079 | schedule_work(&adapter->multispeed_fiber_task); | 6146 | ixgbe_check_vf_rate_limit(adapter); |
6080 | adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; | 6147 | #endif /* HAVE_IPLINK_VF_CONFIG */ |
6081 | } | 6148 | } |
6082 | 6149 | ||
6083 | /** | 6150 | /** |
6084 | * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table | 6151 | * ixgbe_watchdog_link_is_down - update netif_carrier status and |
6085 | * @work: pointer to work_struct containing our data | 6152 | * print link down message |
6153 | * @adapter - pointer to the adapter structure | ||
6086 | **/ | 6154 | **/ |
6087 | static void ixgbe_fdir_reinit_task(struct work_struct *work) | 6155 | static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) |
6088 | { | 6156 | { |
6089 | struct ixgbe_adapter *adapter = container_of(work, | 6157 | struct net_device *netdev = adapter->netdev; |
6090 | struct ixgbe_adapter, | ||
6091 | fdir_reinit_task); | ||
6092 | struct ixgbe_hw *hw = &adapter->hw; | 6158 | struct ixgbe_hw *hw = &adapter->hw; |
6159 | |||
6160 | adapter->link_up = false; | ||
6161 | adapter->link_speed = 0; | ||
6162 | |||
6163 | /* only continue if link was up previously */ | ||
6164 | if (!netif_carrier_ok(netdev)) | ||
6165 | return; | ||
6166 | |||
6167 | /* poll for SFP+ cable when link is down */ | ||
6168 | if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) | ||
6169 | adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; | ||
6170 | |||
6171 | e_info(drv, "NIC Link is Down\n"); | ||
6172 | netif_carrier_off(netdev); | ||
6173 | } | ||
6174 | |||
6175 | /** | ||
6176 | * ixgbe_watchdog_flush_tx - flush queues on link down | ||
6177 | * @adapter - pointer to the device adapter structure | ||
6178 | **/ | ||
6179 | static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) | ||
6180 | { | ||
6093 | int i; | 6181 | int i; |
6182 | int some_tx_pending = 0; | ||
6094 | 6183 | ||
6095 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | 6184 | if (!netif_carrier_ok(adapter->netdev)) { |
6096 | for (i = 0; i < adapter->num_tx_queues; i++) | 6185 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6097 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, | 6186 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
6098 | &(adapter->tx_ring[i]->state)); | 6187 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { |
6099 | } else { | 6188 | some_tx_pending = 1; |
6100 | e_err(probe, "failed to finish FDIR re-initialization, " | 6189 | break; |
6101 | "ignored adding FDIR ATR filters\n"); | 6190 | } |
6191 | } | ||
6192 | |||
6193 | if (some_tx_pending) { | ||
6194 | /* We've lost link, so the controller stops DMA, | ||
6195 | * but we've got queued Tx work that's never going | ||
6196 | * to get done, so reset controller to flush Tx. | ||
6197 | * (Do the reset outside of interrupt context). | ||
6198 | */ | ||
6199 | adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; | ||
6200 | } | ||
6102 | } | 6201 | } |
6103 | /* Done FDIR Re-initialization, enable transmits */ | ||
6104 | netif_tx_start_all_queues(adapter->netdev); | ||
6105 | } | 6202 | } |
6106 | 6203 | ||
6107 | static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) | 6204 | static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) |
@@ -6124,133 +6221,186 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) | |||
6124 | e_warn(drv, "%d Spoofed packets detected\n", ssvpc); | 6221 | e_warn(drv, "%d Spoofed packets detected\n", ssvpc); |
6125 | } | 6222 | } |
6126 | 6223 | ||
6127 | static DEFINE_MUTEX(ixgbe_watchdog_lock); | 6224 | /** |
6225 | * ixgbe_watchdog_subtask - check and bring link up | ||
6226 | * @adapter - pointer to the device adapter structure | ||
6227 | **/ | ||
6228 | static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) | ||
6229 | { | ||
6230 | /* if interface is down do nothing */ | ||
6231 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
6232 | return; | ||
6233 | |||
6234 | ixgbe_watchdog_update_link(adapter); | ||
6235 | |||
6236 | if (adapter->link_up) | ||
6237 | ixgbe_watchdog_link_is_up(adapter); | ||
6238 | else | ||
6239 | ixgbe_watchdog_link_is_down(adapter); | ||
6240 | |||
6241 | ixgbe_spoof_check(adapter); | ||
6242 | ixgbe_update_stats(adapter); | ||
6243 | |||
6244 | ixgbe_watchdog_flush_tx(adapter); | ||
6245 | } | ||
6128 | 6246 | ||
6129 | /** | 6247 | /** |
6130 | * ixgbe_watchdog_task - worker thread to bring link up | 6248 | * ixgbe_sfp_detection_subtask - poll for SFP+ cable |
6131 | * @work: pointer to work_struct containing our data | 6249 | * @adapter - the ixgbe adapter structure |
6132 | **/ | 6250 | **/ |
6133 | static void ixgbe_watchdog_task(struct work_struct *work) | 6251 | static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) |
6134 | { | 6252 | { |
6135 | struct ixgbe_adapter *adapter = container_of(work, | ||
6136 | struct ixgbe_adapter, | ||
6137 | watchdog_task); | ||
6138 | struct net_device *netdev = adapter->netdev; | ||
6139 | struct ixgbe_hw *hw = &adapter->hw; | 6253 | struct ixgbe_hw *hw = &adapter->hw; |
6140 | u32 link_speed; | 6254 | s32 err; |
6141 | bool link_up; | ||
6142 | int i; | ||
6143 | struct ixgbe_ring *tx_ring; | ||
6144 | int some_tx_pending = 0; | ||
6145 | 6255 | ||
6146 | mutex_lock(&ixgbe_watchdog_lock); | 6256 | /* not searching for SFP so there is nothing to do here */ |
6257 | if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && | ||
6258 | !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) | ||
6259 | return; | ||
6147 | 6260 | ||
6148 | link_up = adapter->link_up; | 6261 | /* someone else is in init, wait until next service event */ |
6149 | link_speed = adapter->link_speed; | 6262 | if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) |
6263 | return; | ||
6150 | 6264 | ||
6151 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | 6265 | err = hw->phy.ops.identify_sfp(hw); |
6152 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | 6266 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) |
6153 | if (link_up) { | 6267 | goto sfp_out; |
6154 | #ifdef CONFIG_DCB | ||
6155 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
6156 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | ||
6157 | hw->mac.ops.fc_enable(hw, i); | ||
6158 | } else { | ||
6159 | hw->mac.ops.fc_enable(hw, 0); | ||
6160 | } | ||
6161 | #else | ||
6162 | hw->mac.ops.fc_enable(hw, 0); | ||
6163 | #endif | ||
6164 | } | ||
6165 | 6268 | ||
6166 | if (link_up || | 6269 | if (err == IXGBE_ERR_SFP_NOT_PRESENT) { |
6167 | time_after(jiffies, (adapter->link_check_timeout + | 6270 | /* If no cable is present, then we need to reset |
6168 | IXGBE_TRY_LINK_TIMEOUT))) { | 6271 | * the next time we find a good cable. */ |
6169 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | 6272 | adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; |
6170 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); | ||
6171 | } | ||
6172 | adapter->link_up = link_up; | ||
6173 | adapter->link_speed = link_speed; | ||
6174 | } | 6273 | } |
6175 | 6274 | ||
6176 | if (link_up) { | 6275 | /* exit on error */ |
6177 | if (!netif_carrier_ok(netdev)) { | 6276 | if (err) |
6178 | bool flow_rx, flow_tx; | 6277 | goto sfp_out; |
6179 | |||
6180 | switch (hw->mac.type) { | ||
6181 | case ixgbe_mac_82598EB: { | ||
6182 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | ||
6183 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); | ||
6184 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); | ||
6185 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); | ||
6186 | } | ||
6187 | break; | ||
6188 | case ixgbe_mac_82599EB: | ||
6189 | case ixgbe_mac_X540: { | ||
6190 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | ||
6191 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | ||
6192 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); | ||
6193 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | ||
6194 | } | ||
6195 | break; | ||
6196 | default: | ||
6197 | flow_tx = false; | ||
6198 | flow_rx = false; | ||
6199 | break; | ||
6200 | } | ||
6201 | 6278 | ||
6202 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", | 6279 | /* exit if reset not needed */ |
6203 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? | 6280 | if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) |
6204 | "10 Gbps" : | 6281 | goto sfp_out; |
6205 | (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? | ||
6206 | "1 Gbps" : | ||
6207 | (link_speed == IXGBE_LINK_SPEED_100_FULL ? | ||
6208 | "100 Mbps" : | ||
6209 | "unknown speed"))), | ||
6210 | ((flow_rx && flow_tx) ? "RX/TX" : | ||
6211 | (flow_rx ? "RX" : | ||
6212 | (flow_tx ? "TX" : "None")))); | ||
6213 | |||
6214 | netif_carrier_on(netdev); | ||
6215 | ixgbe_check_vf_rate_limit(adapter); | ||
6216 | } else { | ||
6217 | /* Force detection of hung controller */ | ||
6218 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
6219 | tx_ring = adapter->tx_ring[i]; | ||
6220 | set_check_for_tx_hang(tx_ring); | ||
6221 | } | ||
6222 | } | ||
6223 | } else { | ||
6224 | adapter->link_up = false; | ||
6225 | adapter->link_speed = 0; | ||
6226 | if (netif_carrier_ok(netdev)) { | ||
6227 | e_info(drv, "NIC Link is Down\n"); | ||
6228 | netif_carrier_off(netdev); | ||
6229 | } | ||
6230 | } | ||
6231 | 6282 | ||
6232 | if (!netif_carrier_ok(netdev)) { | 6283 | adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; |
6233 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
6234 | tx_ring = adapter->tx_ring[i]; | ||
6235 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { | ||
6236 | some_tx_pending = 1; | ||
6237 | break; | ||
6238 | } | ||
6239 | } | ||
6240 | 6284 | ||
6241 | if (some_tx_pending) { | 6285 | /* |
6242 | /* We've lost link, so the controller stops DMA, | 6286 | * A module may be identified correctly, but the EEPROM may not have |
6243 | * but we've got queued Tx work that's never going | 6287 | * support for that module. setup_sfp() will fail in that case, so |
6244 | * to get done, so reset controller to flush Tx. | 6288 | * we should not allow that module to load. |
6245 | * (Do the reset outside of interrupt context). | 6289 | */ |
6246 | */ | 6290 | if (hw->mac.type == ixgbe_mac_82598EB) |
6247 | schedule_work(&adapter->reset_task); | 6291 | err = hw->phy.ops.reset(hw); |
6248 | } | 6292 | else |
6293 | err = hw->mac.ops.setup_sfp(hw); | ||
6294 | |||
6295 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) | ||
6296 | goto sfp_out; | ||
6297 | |||
6298 | adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; | ||
6299 | e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); | ||
6300 | |||
6301 | sfp_out: | ||
6302 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
6303 | |||
6304 | if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && | ||
6305 | (adapter->netdev->reg_state == NETREG_REGISTERED)) { | ||
6306 | e_dev_err("failed to initialize because an unsupported " | ||
6307 | "SFP+ module type was detected.\n"); | ||
6308 | e_dev_err("Reload the driver after installing a " | ||
6309 | "supported module.\n"); | ||
6310 | unregister_netdev(adapter->netdev); | ||
6249 | } | 6311 | } |
6312 | } | ||
6250 | 6313 | ||
6251 | ixgbe_spoof_check(adapter); | 6314 | /** |
6252 | ixgbe_update_stats(adapter); | 6315 | * ixgbe_sfp_link_config_subtask - set up link SFP after module install |
6253 | mutex_unlock(&ixgbe_watchdog_lock); | 6316 | * @adapter - the ixgbe adapter structure |
6317 | **/ | ||
6318 | static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) | ||
6319 | { | ||
6320 | struct ixgbe_hw *hw = &adapter->hw; | ||
6321 | u32 autoneg; | ||
6322 | bool negotiation; | ||
6323 | |||
6324 | if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) | ||
6325 | return; | ||
6326 | |||
6327 | /* someone else is in init, wait until next service event */ | ||
6328 | if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) | ||
6329 | return; | ||
6330 | |||
6331 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; | ||
6332 | |||
6333 | autoneg = hw->phy.autoneg_advertised; | ||
6334 | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | ||
6335 | hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); | ||
6336 | hw->mac.autotry_restart = false; | ||
6337 | if (hw->mac.ops.setup_link) | ||
6338 | hw->mac.ops.setup_link(hw, autoneg, negotiation, true); | ||
6339 | |||
6340 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | ||
6341 | adapter->link_check_timeout = jiffies; | ||
6342 | clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); | ||
6343 | } | ||
6344 | |||
6345 | /** | ||
6346 | * ixgbe_service_timer - Timer Call-back | ||
6347 | * @data: pointer to adapter cast into an unsigned long | ||
6348 | **/ | ||
6349 | static void ixgbe_service_timer(unsigned long data) | ||
6350 | { | ||
6351 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | ||
6352 | unsigned long next_event_offset; | ||
6353 | |||
6354 | /* poll faster when waiting for link */ | ||
6355 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) | ||
6356 | next_event_offset = HZ / 10; | ||
6357 | else | ||
6358 | next_event_offset = HZ * 2; | ||
6359 | |||
6360 | /* Reset the timer */ | ||
6361 | mod_timer(&adapter->service_timer, next_event_offset + jiffies); | ||
6362 | |||
6363 | ixgbe_service_event_schedule(adapter); | ||
6364 | } | ||
6365 | |||
6366 | static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) | ||
6367 | { | ||
6368 | if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) | ||
6369 | return; | ||
6370 | |||
6371 | adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; | ||
6372 | |||
6373 | /* If we're already down or resetting, just bail */ | ||
6374 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
6375 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
6376 | return; | ||
6377 | |||
6378 | ixgbe_dump(adapter); | ||
6379 | netdev_err(adapter->netdev, "Reset adapter\n"); | ||
6380 | adapter->tx_timeout_count++; | ||
6381 | |||
6382 | ixgbe_reinit_locked(adapter); | ||
6383 | } | ||
6384 | |||
6385 | /** | ||
6386 | * ixgbe_service_task - manages and runs subtasks | ||
6387 | * @work: pointer to work_struct containing our data | ||
6388 | **/ | ||
6389 | static void ixgbe_service_task(struct work_struct *work) | ||
6390 | { | ||
6391 | struct ixgbe_adapter *adapter = container_of(work, | ||
6392 | struct ixgbe_adapter, | ||
6393 | service_task); | ||
6394 | |||
6395 | ixgbe_reset_subtask(adapter); | ||
6396 | ixgbe_sfp_detection_subtask(adapter); | ||
6397 | ixgbe_sfp_link_config_subtask(adapter); | ||
6398 | ixgbe_check_overtemp_subtask(adapter); | ||
6399 | ixgbe_watchdog_subtask(adapter); | ||
6400 | ixgbe_fdir_reinit_subtask(adapter); | ||
6401 | ixgbe_check_hang_subtask(adapter); | ||
6402 | |||
6403 | ixgbe_service_event_complete(adapter); | ||
6254 | } | 6404 | } |
6255 | 6405 | ||
6256 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 6406 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
@@ -7089,6 +7239,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | |||
7089 | #ifdef CONFIG_PCI_IOV | 7239 | #ifdef CONFIG_PCI_IOV |
7090 | struct ixgbe_hw *hw = &adapter->hw; | 7240 | struct ixgbe_hw *hw = &adapter->hw; |
7091 | int err; | 7241 | int err; |
7242 | int num_vf_macvlans, i; | ||
7243 | struct vf_macvlans *mv_list; | ||
7092 | 7244 | ||
7093 | if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) | 7245 | if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) |
7094 | return; | 7246 | return; |
@@ -7105,6 +7257,26 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | |||
7105 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); | 7257 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); |
7106 | goto err_novfs; | 7258 | goto err_novfs; |
7107 | } | 7259 | } |
7260 | |||
7261 | num_vf_macvlans = hw->mac.num_rar_entries - | ||
7262 | (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); | ||
7263 | |||
7264 | adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, | ||
7265 | sizeof(struct vf_macvlans), | ||
7266 | GFP_KERNEL); | ||
7267 | if (mv_list) { | ||
7268 | /* Initialize list of VF macvlans */ | ||
7269 | INIT_LIST_HEAD(&adapter->vf_mvs.l); | ||
7270 | for (i = 0; i < num_vf_macvlans; i++) { | ||
7271 | mv_list->vf = -1; | ||
7272 | mv_list->free = true; | ||
7273 | mv_list->rar_entry = hw->mac.num_rar_entries - | ||
7274 | (i + adapter->num_vfs + 1); | ||
7275 | list_add(&mv_list->l, &adapter->vf_mvs.l); | ||
7276 | mv_list++; | ||
7277 | } | ||
7278 | } | ||
7279 | |||
7108 | /* If call to enable VFs succeeded then allocate memory | 7280 | /* If call to enable VFs succeeded then allocate memory |
7109 | * for per VF control structures. | 7281 | * for per VF control structures. |
7110 | */ | 7282 | */ |
@@ -7275,22 +7447,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7275 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; | 7447 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; |
7276 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; | 7448 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; |
7277 | 7449 | ||
7278 | /* set up this timer and work struct before calling get_invariants | ||
7279 | * which might start the timer | ||
7280 | */ | ||
7281 | init_timer(&adapter->sfp_timer); | ||
7282 | adapter->sfp_timer.function = ixgbe_sfp_timer; | ||
7283 | adapter->sfp_timer.data = (unsigned long) adapter; | ||
7284 | |||
7285 | INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); | ||
7286 | |||
7287 | /* multispeed fiber has its own tasklet, called from GPI SDP1 context */ | ||
7288 | INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task); | ||
7289 | |||
7290 | /* a new SFP+ module arrival, called from GPI SDP2 context */ | ||
7291 | INIT_WORK(&adapter->sfp_config_module_task, | ||
7292 | ixgbe_sfp_config_module_task); | ||
7293 | |||
7294 | ii->get_invariants(hw); | 7450 | ii->get_invariants(hw); |
7295 | 7451 | ||
7296 | /* setup the private structure */ | 7452 | /* setup the private structure */ |
@@ -7324,17 +7480,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7324 | hw->phy.reset_if_overtemp = false; | 7480 | hw->phy.reset_if_overtemp = false; |
7325 | if (err == IXGBE_ERR_SFP_NOT_PRESENT && | 7481 | if (err == IXGBE_ERR_SFP_NOT_PRESENT && |
7326 | hw->mac.type == ixgbe_mac_82598EB) { | 7482 | hw->mac.type == ixgbe_mac_82598EB) { |
7327 | /* | ||
7328 | * Start a kernel thread to watch for a module to arrive. | ||
7329 | * Only do this for 82598, since 82599 will generate | ||
7330 | * interrupts on module arrival. | ||
7331 | */ | ||
7332 | set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
7333 | mod_timer(&adapter->sfp_timer, | ||
7334 | round_jiffies(jiffies + (2 * HZ))); | ||
7335 | err = 0; | 7483 | err = 0; |
7336 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 7484 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
7337 | e_dev_err("failed to initialize because an unsupported SFP+ " | 7485 | e_dev_err("failed to load because an unsupported SFP+ " |
7338 | "module type was detected.\n"); | 7486 | "module type was detected.\n"); |
7339 | e_dev_err("Reload the driver after installing a supported " | 7487 | e_dev_err("Reload the driver after installing a supported " |
7340 | "module.\n"); | 7488 | "module.\n"); |
@@ -7356,9 +7504,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7356 | netdev->features |= NETIF_F_TSO; | 7504 | netdev->features |= NETIF_F_TSO; |
7357 | netdev->features |= NETIF_F_TSO6; | 7505 | netdev->features |= NETIF_F_TSO6; |
7358 | netdev->features |= NETIF_F_GRO; | 7506 | netdev->features |= NETIF_F_GRO; |
7507 | netdev->features |= NETIF_F_RXHASH; | ||
7359 | 7508 | ||
7360 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 7509 | switch (adapter->hw.mac.type) { |
7510 | case ixgbe_mac_82599EB: | ||
7511 | case ixgbe_mac_X540: | ||
7361 | netdev->features |= NETIF_F_SCTP_CSUM; | 7512 | netdev->features |= NETIF_F_SCTP_CSUM; |
7513 | break; | ||
7514 | default: | ||
7515 | break; | ||
7516 | } | ||
7362 | 7517 | ||
7363 | netdev->vlan_features |= NETIF_F_TSO; | 7518 | netdev->vlan_features |= NETIF_F_TSO; |
7364 | netdev->vlan_features |= NETIF_F_TSO6; | 7519 | netdev->vlan_features |= NETIF_F_TSO6; |
@@ -7419,17 +7574,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7419 | (hw->mac.type == ixgbe_mac_82599EB)))) | 7574 | (hw->mac.type == ixgbe_mac_82599EB)))) |
7420 | hw->mac.ops.disable_tx_laser(hw); | 7575 | hw->mac.ops.disable_tx_laser(hw); |
7421 | 7576 | ||
7422 | init_timer(&adapter->watchdog_timer); | 7577 | setup_timer(&adapter->service_timer, &ixgbe_service_timer, |
7423 | adapter->watchdog_timer.function = ixgbe_watchdog; | 7578 | (unsigned long) adapter); |
7424 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
7425 | 7579 | ||
7426 | INIT_WORK(&adapter->reset_task, ixgbe_reset_task); | 7580 | INIT_WORK(&adapter->service_task, ixgbe_service_task); |
7427 | INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); | 7581 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); |
7428 | 7582 | ||
7429 | err = ixgbe_init_interrupt_scheme(adapter); | 7583 | err = ixgbe_init_interrupt_scheme(adapter); |
7430 | if (err) | 7584 | if (err) |
7431 | goto err_sw_init; | 7585 | goto err_sw_init; |
7432 | 7586 | ||
7587 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
7588 | netdev->features &= ~NETIF_F_RXHASH; | ||
7589 | |||
7433 | switch (pdev->device) { | 7590 | switch (pdev->device) { |
7434 | case IXGBE_DEV_ID_82599_SFP: | 7591 | case IXGBE_DEV_ID_82599_SFP: |
7435 | /* Only this subdevice supports WOL */ | 7592 | /* Only this subdevice supports WOL */ |
@@ -7458,8 +7615,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7458 | 7615 | ||
7459 | /* print bus type/speed/width info */ | 7616 | /* print bus type/speed/width info */ |
7460 | e_dev_info("(PCI Express:%s:%s) %pM\n", | 7617 | e_dev_info("(PCI Express:%s:%s) %pM\n", |
7461 | (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" : | 7618 | (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" : |
7462 | hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" : | 7619 | hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : |
7463 | "Unknown"), | 7620 | "Unknown"), |
7464 | (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : | 7621 | (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : |
7465 | hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : | 7622 | hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : |
@@ -7508,13 +7665,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7508 | /* carrier off reporting is important to ethtool even BEFORE open */ | 7665 | /* carrier off reporting is important to ethtool even BEFORE open */ |
7509 | netif_carrier_off(netdev); | 7666 | netif_carrier_off(netdev); |
7510 | 7667 | ||
7511 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
7512 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
7513 | INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); | ||
7514 | |||
7515 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) | ||
7516 | INIT_WORK(&adapter->check_overtemp_task, | ||
7517 | ixgbe_check_overtemp_task); | ||
7518 | #ifdef CONFIG_IXGBE_DCA | 7668 | #ifdef CONFIG_IXGBE_DCA |
7519 | if (dca_add_requester(&pdev->dev) == 0) { | 7669 | if (dca_add_requester(&pdev->dev) == 0) { |
7520 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 7670 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
@@ -7541,11 +7691,7 @@ err_sw_init: | |||
7541 | err_eeprom: | 7691 | err_eeprom: |
7542 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 7692 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
7543 | ixgbe_disable_sriov(adapter); | 7693 | ixgbe_disable_sriov(adapter); |
7544 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 7694 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
7545 | del_timer_sync(&adapter->sfp_timer); | ||
7546 | cancel_work_sync(&adapter->sfp_task); | ||
7547 | cancel_work_sync(&adapter->multispeed_fiber_task); | ||
7548 | cancel_work_sync(&adapter->sfp_config_module_task); | ||
7549 | iounmap(hw->hw_addr); | 7695 | iounmap(hw->hw_addr); |
7550 | err_ioremap: | 7696 | err_ioremap: |
7551 | free_netdev(netdev); | 7697 | free_netdev(netdev); |
@@ -7573,24 +7719,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
7573 | struct net_device *netdev = adapter->netdev; | 7719 | struct net_device *netdev = adapter->netdev; |
7574 | 7720 | ||
7575 | set_bit(__IXGBE_DOWN, &adapter->state); | 7721 | set_bit(__IXGBE_DOWN, &adapter->state); |
7576 | 7722 | cancel_work_sync(&adapter->service_task); | |
7577 | /* | ||
7578 | * The timers may be rescheduled, so explicitly disable them | ||
7579 | * from being rescheduled. | ||
7580 | */ | ||
7581 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
7582 | del_timer_sync(&adapter->watchdog_timer); | ||
7583 | del_timer_sync(&adapter->sfp_timer); | ||
7584 | |||
7585 | cancel_work_sync(&adapter->watchdog_task); | ||
7586 | cancel_work_sync(&adapter->sfp_task); | ||
7587 | cancel_work_sync(&adapter->multispeed_fiber_task); | ||
7588 | cancel_work_sync(&adapter->sfp_config_module_task); | ||
7589 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
7590 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
7591 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
7592 | if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) | ||
7593 | cancel_work_sync(&adapter->check_overtemp_task); | ||
7594 | 7723 | ||
7595 | #ifdef CONFIG_IXGBE_DCA | 7724 | #ifdef CONFIG_IXGBE_DCA |
7596 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | 7725 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h index fe6ea81dc7f8..b239bdac38da 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ixgbe/ixgbe_mbx.h | |||
@@ -36,9 +36,6 @@ | |||
36 | #define IXGBE_VFMAILBOX 0x002FC | 36 | #define IXGBE_VFMAILBOX 0x002FC |
37 | #define IXGBE_VFMBMEM 0x00200 | 37 | #define IXGBE_VFMBMEM 0x00200 |
38 | 38 | ||
39 | #define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) | ||
40 | #define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) | ||
41 | |||
42 | #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ | 39 | #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ |
43 | #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ | 40 | #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ |
44 | #define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | 41 | #define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ |
@@ -70,6 +67,7 @@ | |||
70 | #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | 67 | #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ |
71 | #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | 68 | #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ |
72 | #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | 69 | #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ |
70 | #define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ | ||
73 | 71 | ||
74 | /* length of permanent address message returned from PF */ | 72 | /* length of permanent address message returned from PF */ |
75 | #define IXGBE_VF_PERMADDR_MSG_LEN 4 | 73 | #define IXGBE_VF_PERMADDR_MSG_LEN 4 |
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index df5b8aa4795d..735f686c3b36 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -449,7 +449,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) | |||
449 | MDIO_MMD_AN, | 449 | MDIO_MMD_AN, |
450 | &autoneg_reg); | 450 | &autoneg_reg); |
451 | 451 | ||
452 | autoneg_reg &= ~ADVERTISE_100FULL; | 452 | autoneg_reg &= ~(ADVERTISE_100FULL | |
453 | ADVERTISE_100HALF); | ||
453 | if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) | 454 | if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) |
454 | autoneg_reg |= ADVERTISE_100FULL; | 455 | autoneg_reg |= ADVERTISE_100FULL; |
455 | 456 | ||
@@ -656,7 +657,8 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) | |||
656 | MDIO_MMD_AN, | 657 | MDIO_MMD_AN, |
657 | &autoneg_reg); | 658 | &autoneg_reg); |
658 | 659 | ||
659 | autoneg_reg &= ~ADVERTISE_100FULL; | 660 | autoneg_reg &= ~(ADVERTISE_100FULL | |
661 | ADVERTISE_100HALF); | ||
660 | if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) | 662 | if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) |
661 | autoneg_reg |= ADVERTISE_100FULL; | 663 | autoneg_reg |= ADVERTISE_100FULL; |
662 | 664 | ||
@@ -753,7 +755,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) | |||
753 | &phy_data); | 755 | &phy_data); |
754 | if ((phy_data & MDIO_CTRL1_RESET) == 0) | 756 | if ((phy_data & MDIO_CTRL1_RESET) == 0) |
755 | break; | 757 | break; |
756 | msleep(10); | 758 | usleep_range(10000, 20000); |
757 | } | 759 | } |
758 | 760 | ||
759 | if ((phy_data & MDIO_CTRL1_RESET) != 0) { | 761 | if ((phy_data & MDIO_CTRL1_RESET) != 0) { |
@@ -782,7 +784,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) | |||
782 | case IXGBE_DELAY_NL: | 784 | case IXGBE_DELAY_NL: |
783 | data_offset++; | 785 | data_offset++; |
784 | hw_dbg(hw, "DELAY: %d MS\n", edata); | 786 | hw_dbg(hw, "DELAY: %d MS\n", edata); |
785 | msleep(edata); | 787 | usleep_range(edata * 1000, edata * 2000); |
786 | break; | 788 | break; |
787 | case IXGBE_DATA_NL: | 789 | case IXGBE_DATA_NL: |
788 | hw_dbg(hw, "DATA:\n"); | 790 | hw_dbg(hw, "DATA:\n"); |
@@ -1220,7 +1222,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, | |||
1220 | swfw_mask = IXGBE_GSSR_PHY0_SM; | 1222 | swfw_mask = IXGBE_GSSR_PHY0_SM; |
1221 | 1223 | ||
1222 | do { | 1224 | do { |
1223 | if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { | 1225 | if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { |
1224 | status = IXGBE_ERR_SWFW_SYNC; | 1226 | status = IXGBE_ERR_SWFW_SYNC; |
1225 | goto read_byte_out; | 1227 | goto read_byte_out; |
1226 | } | 1228 | } |
@@ -1267,7 +1269,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, | |||
1267 | break; | 1269 | break; |
1268 | 1270 | ||
1269 | fail: | 1271 | fail: |
1270 | ixgbe_release_swfw_sync(hw, swfw_mask); | 1272 | hw->mac.ops.release_swfw_sync(hw, swfw_mask); |
1271 | msleep(100); | 1273 | msleep(100); |
1272 | ixgbe_i2c_bus_clear(hw); | 1274 | ixgbe_i2c_bus_clear(hw); |
1273 | retry++; | 1275 | retry++; |
@@ -1278,7 +1280,7 @@ fail: | |||
1278 | 1280 | ||
1279 | } while (retry < max_retry); | 1281 | } while (retry < max_retry); |
1280 | 1282 | ||
1281 | ixgbe_release_swfw_sync(hw, swfw_mask); | 1283 | hw->mac.ops.release_swfw_sync(hw, swfw_mask); |
1282 | 1284 | ||
1283 | read_byte_out: | 1285 | read_byte_out: |
1284 | return status; | 1286 | return status; |
@@ -1306,7 +1308,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, | |||
1306 | else | 1308 | else |
1307 | swfw_mask = IXGBE_GSSR_PHY0_SM; | 1309 | swfw_mask = IXGBE_GSSR_PHY0_SM; |
1308 | 1310 | ||
1309 | if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { | 1311 | if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { |
1310 | status = IXGBE_ERR_SWFW_SYNC; | 1312 | status = IXGBE_ERR_SWFW_SYNC; |
1311 | goto write_byte_out; | 1313 | goto write_byte_out; |
1312 | } | 1314 | } |
@@ -1350,7 +1352,7 @@ fail: | |||
1350 | hw_dbg(hw, "I2C byte write error.\n"); | 1352 | hw_dbg(hw, "I2C byte write error.\n"); |
1351 | } while (retry < max_retry); | 1353 | } while (retry < max_retry); |
1352 | 1354 | ||
1353 | ixgbe_release_swfw_sync(hw, swfw_mask); | 1355 | hw->mac.ops.release_swfw_sync(hw, swfw_mask); |
1354 | 1356 | ||
1355 | write_byte_out: | 1357 | write_byte_out: |
1356 | return status; | 1358 | return status; |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 6e50d8328942..ac99b0458fe2 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -82,6 +82,21 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) | ||
86 | { | ||
87 | struct ixgbe_hw *hw = &adapter->hw; | ||
88 | struct list_head *pos; | ||
89 | struct vf_macvlans *entry; | ||
90 | |||
91 | list_for_each(pos, &adapter->vf_mvs.l) { | ||
92 | entry = list_entry(pos, struct vf_macvlans, l); | ||
93 | if (entry->free == false) | ||
94 | hw->mac.ops.set_rar(hw, entry->rar_entry, | ||
95 | entry->vf_macvlan, | ||
96 | entry->vf, IXGBE_RAH_AV); | ||
97 | } | ||
98 | } | ||
99 | |||
85 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) | 100 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) |
86 | { | 101 | { |
87 | struct ixgbe_hw *hw = &adapter->hw; | 102 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -102,6 +117,9 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) | |||
102 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); | 117 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
103 | } | 118 | } |
104 | } | 119 | } |
120 | |||
121 | /* Restore any VF macvlans */ | ||
122 | ixgbe_restore_vf_macvlans(adapter); | ||
105 | } | 123 | } |
106 | 124 | ||
107 | static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, | 125 | static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, |
@@ -110,7 +128,7 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, | |||
110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); | 128 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); |
111 | } | 129 | } |
112 | 130 | ||
113 | void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) | 131 | static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) |
114 | { | 132 | { |
115 | struct ixgbe_hw *hw = &adapter->hw; | 133 | struct ixgbe_hw *hw = &adapter->hw; |
116 | int new_mtu = msgbuf[1]; | 134 | int new_mtu = msgbuf[1]; |
@@ -200,6 +218,61 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | |||
200 | return 0; | 218 | return 0; |
201 | } | 219 | } |
202 | 220 | ||
221 | static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, | ||
222 | int vf, int index, unsigned char *mac_addr) | ||
223 | { | ||
224 | struct ixgbe_hw *hw = &adapter->hw; | ||
225 | struct list_head *pos; | ||
226 | struct vf_macvlans *entry; | ||
227 | |||
228 | if (index <= 1) { | ||
229 | list_for_each(pos, &adapter->vf_mvs.l) { | ||
230 | entry = list_entry(pos, struct vf_macvlans, l); | ||
231 | if (entry->vf == vf) { | ||
232 | entry->vf = -1; | ||
233 | entry->free = true; | ||
234 | entry->is_macvlan = false; | ||
235 | hw->mac.ops.clear_rar(hw, entry->rar_entry); | ||
236 | } | ||
237 | } | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * If index was zero then we were asked to clear the uc list | ||
242 | * for the VF. We're done. | ||
243 | */ | ||
244 | if (!index) | ||
245 | return 0; | ||
246 | |||
247 | entry = NULL; | ||
248 | |||
249 | list_for_each(pos, &adapter->vf_mvs.l) { | ||
250 | entry = list_entry(pos, struct vf_macvlans, l); | ||
251 | if (entry->free) | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * If we traversed the entire list and didn't find a free entry | ||
257 | * then we're out of space on the RAR table. Also entry may | ||
258 | * be NULL because the original memory allocation for the list | ||
259 | * failed, which is not fatal but does mean we can't support | ||
260 | * VF requests for MACVLAN because we couldn't allocate | ||
261 | * memory for the list management required. | ||
262 | */ | ||
263 | if (!entry || !entry->free) | ||
264 | return -ENOSPC; | ||
265 | |||
266 | entry->free = false; | ||
267 | entry->is_macvlan = true; | ||
268 | entry->vf = vf; | ||
269 | memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); | ||
270 | |||
271 | hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
203 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | 276 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) |
204 | { | 277 | { |
205 | unsigned char vf_mac_addr[6]; | 278 | unsigned char vf_mac_addr[6]; |
@@ -251,12 +324,12 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) | |||
251 | static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) | 324 | static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
252 | { | 325 | { |
253 | u32 mbx_size = IXGBE_VFMAILBOX_SIZE; | 326 | u32 mbx_size = IXGBE_VFMAILBOX_SIZE; |
254 | u32 msgbuf[mbx_size]; | 327 | u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; |
255 | struct ixgbe_hw *hw = &adapter->hw; | 328 | struct ixgbe_hw *hw = &adapter->hw; |
256 | s32 retval; | 329 | s32 retval; |
257 | int entries; | 330 | int entries; |
258 | u16 *hash_list; | 331 | u16 *hash_list; |
259 | int add, vid; | 332 | int add, vid, index; |
260 | u8 *new_mac; | 333 | u8 *new_mac; |
261 | 334 | ||
262 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); | 335 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); |
@@ -345,6 +418,24 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) | |||
345 | retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); | 418 | retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); |
346 | } | 419 | } |
347 | break; | 420 | break; |
421 | case IXGBE_VF_SET_MACVLAN: | ||
422 | index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> | ||
423 | IXGBE_VT_MSGINFO_SHIFT; | ||
424 | /* | ||
425 | * If the VF is allowed to set MAC filters then turn off | ||
426 | * anti-spoofing to avoid false positives. An index | ||
427 | * greater than 0 will indicate the VF is setting a | ||
428 | * macvlan MAC filter. | ||
429 | */ | ||
430 | if (index > 0 && adapter->antispoofing_enabled) { | ||
431 | hw->mac.ops.set_mac_anti_spoofing(hw, false, | ||
432 | adapter->num_vfs); | ||
433 | hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); | ||
434 | adapter->antispoofing_enabled = false; | ||
435 | } | ||
436 | retval = ixgbe_set_vf_macvlan(adapter, vf, index, | ||
437 | (unsigned char *)(&msgbuf[1])); | ||
438 | break; | ||
348 | default: | 439 | default: |
349 | e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); | 440 | e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); |
350 | retval = IXGBE_ERR_MBX; | 441 | retval = IXGBE_ERR_MBX; |
@@ -452,7 +543,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
452 | goto out; | 543 | goto out; |
453 | ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); | 544 | ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); |
454 | ixgbe_set_vmolr(hw, vf, false); | 545 | ixgbe_set_vmolr(hw, vf, false); |
455 | hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); | 546 | if (adapter->antispoofing_enabled) |
547 | hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); | ||
456 | adapter->vfinfo[vf].pf_vlan = vlan; | 548 | adapter->vfinfo[vf].pf_vlan = vlan; |
457 | adapter->vfinfo[vf].pf_qos = qos; | 549 | adapter->vfinfo[vf].pf_qos = qos; |
458 | dev_info(&adapter->pdev->dev, | 550 | dev_info(&adapter->pdev->dev, |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 25c1fb7eda06..fa43f2507f43 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -58,9 +58,11 @@ | |||
58 | #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 | 58 | #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 |
59 | #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 | 59 | #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 |
60 | #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 | 60 | #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 |
61 | #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D | ||
61 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC | 62 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC |
62 | #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 | 63 | #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 |
63 | #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C | 64 | #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C |
65 | #define IXGBE_DEV_ID_82599_LS 0x154F | ||
64 | #define IXGBE_DEV_ID_X540T 0x1528 | 66 | #define IXGBE_DEV_ID_X540T 0x1528 |
65 | 67 | ||
66 | /* General Registers */ | 68 | /* General Registers */ |
@@ -163,6 +165,9 @@ | |||
163 | (0x0D018 + ((_i - 64) * 0x40))) | 165 | (0x0D018 + ((_i - 64) * 0x40))) |
164 | #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ | 166 | #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ |
165 | (0x0D028 + ((_i - 64) * 0x40))) | 167 | (0x0D028 + ((_i - 64) * 0x40))) |
168 | #define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ | ||
169 | (0x0D02C + ((_i - 64) * 0x40))) | ||
170 | #define IXGBE_RSCDBU 0x03028 | ||
166 | #define IXGBE_RDDCC 0x02F20 | 171 | #define IXGBE_RDDCC 0x02F20 |
167 | #define IXGBE_RXMEMWRAP 0x03190 | 172 | #define IXGBE_RXMEMWRAP 0x03190 |
168 | #define IXGBE_STARCTRL 0x03024 | 173 | #define IXGBE_STARCTRL 0x03024 |
@@ -227,17 +232,23 @@ | |||
227 | #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ | 232 | #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ |
228 | #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ | 233 | #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ |
229 | #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ | 234 | #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ |
230 | #define IXGBE_VT_CTL 0x051B0 | 235 | #define IXGBE_VT_CTL 0x051B0 |
231 | #define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) | 236 | #define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ |
232 | #define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) | 237 | #define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ |
233 | #define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) | 238 | #define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ |
234 | #define IXGBE_QDE 0x2F04 | 239 | #define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ |
235 | #define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ | 240 | #define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) |
236 | #define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) | 241 | #define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) |
237 | #define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4)) | 242 | #define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) |
238 | #define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) | 243 | #define IXGBE_QDE 0x2F04 |
239 | #define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) | 244 | #define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ |
240 | #define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ | 245 | #define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ |
246 | #define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) | ||
247 | #define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) | ||
248 | #define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) | ||
249 | #define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) | ||
250 | #define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ | ||
251 | #define IXGBE_RXFECCERR0 0x051B8 | ||
241 | #define IXGBE_LLITHRESH 0x0EC90 | 252 | #define IXGBE_LLITHRESH 0x0EC90 |
242 | #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ | 253 | #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ |
243 | #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ | 254 | #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ |
@@ -364,7 +375,7 @@ | |||
364 | #define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ | 375 | #define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ |
365 | #define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ | 376 | #define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ |
366 | #define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ | 377 | #define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ |
367 | #define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all 6 wakeup filters*/ | 378 | #define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ |
368 | #define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ | 379 | #define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ |
369 | 380 | ||
370 | /* Wake Up Status */ | 381 | /* Wake Up Status */ |
@@ -406,7 +417,6 @@ | |||
406 | #define IXGBE_SECTXSTAT 0x08804 | 417 | #define IXGBE_SECTXSTAT 0x08804 |
407 | #define IXGBE_SECTXBUFFAF 0x08808 | 418 | #define IXGBE_SECTXBUFFAF 0x08808 |
408 | #define IXGBE_SECTXMINIFG 0x08810 | 419 | #define IXGBE_SECTXMINIFG 0x08810 |
409 | #define IXGBE_SECTXSTAT 0x08804 | ||
410 | #define IXGBE_SECRXCTRL 0x08D00 | 420 | #define IXGBE_SECRXCTRL 0x08D00 |
411 | #define IXGBE_SECRXSTAT 0x08D04 | 421 | #define IXGBE_SECRXSTAT 0x08D04 |
412 | 422 | ||
@@ -499,21 +509,6 @@ | |||
499 | 509 | ||
500 | #define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 | 510 | #define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 |
501 | 511 | ||
502 | /* HW RSC registers */ | ||
503 | #define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ | ||
504 | (0x0D02C + ((_i - 64) * 0x40))) | ||
505 | #define IXGBE_RSCDBU 0x03028 | ||
506 | #define IXGBE_RSCCTL_RSCEN 0x01 | ||
507 | #define IXGBE_RSCCTL_MAXDESC_1 0x00 | ||
508 | #define IXGBE_RSCCTL_MAXDESC_4 0x04 | ||
509 | #define IXGBE_RSCCTL_MAXDESC_8 0x08 | ||
510 | #define IXGBE_RSCCTL_MAXDESC_16 0x0C | ||
511 | #define IXGBE_RXDADV_RSCCNT_SHIFT 17 | ||
512 | #define IXGBE_GPIE_RSC_DELAY_SHIFT 11 | ||
513 | #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 | ||
514 | #define IXGBE_RSCDBU_RSCACKDIS 0x00000080 | ||
515 | #define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 | ||
516 | |||
517 | /* DCB registers */ | 512 | /* DCB registers */ |
518 | #define IXGBE_RTRPCS 0x02430 | 513 | #define IXGBE_RTRPCS 0x02430 |
519 | #define IXGBE_RTTDCS 0x04900 | 514 | #define IXGBE_RTTDCS 0x04900 |
@@ -522,6 +517,7 @@ | |||
522 | #define IXGBE_RTRUP2TC 0x03020 | 517 | #define IXGBE_RTRUP2TC 0x03020 |
523 | #define IXGBE_RTTUP2TC 0x0C800 | 518 | #define IXGBE_RTTUP2TC 0x0C800 |
524 | #define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ | 519 | #define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ |
520 | #define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ | ||
525 | #define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ | 521 | #define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ |
526 | #define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ | 522 | #define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ |
527 | #define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ | 523 | #define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ |
@@ -540,7 +536,7 @@ | |||
540 | (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) | 536 | (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) |
541 | 537 | ||
542 | 538 | ||
543 | /* FCoE registers */ | 539 | /* FCoE DMA Context Registers */ |
544 | #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ | 540 | #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ |
545 | #define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ | 541 | #define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ |
546 | #define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ | 542 | #define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ |
@@ -677,6 +673,10 @@ | |||
677 | #define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ | 673 | #define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ |
678 | #define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ | 674 | #define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ |
679 | #define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ | 675 | #define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ |
676 | #define IXGBE_O2BGPTC 0x041C4 | ||
677 | #define IXGBE_O2BSPC 0x087B0 | ||
678 | #define IXGBE_B2OSPC 0x041C0 | ||
679 | #define IXGBE_B2OGPRC 0x02F90 | ||
680 | #define IXGBE_PCRC8ECL 0x0E810 | 680 | #define IXGBE_PCRC8ECL 0x0E810 |
681 | #define IXGBE_PCRC8ECH 0x0E811 | 681 | #define IXGBE_PCRC8ECH 0x0E811 |
682 | #define IXGBE_PCRC8ECH_MASK 0x1F | 682 | #define IXGBE_PCRC8ECH_MASK 0x1F |
@@ -742,17 +742,10 @@ | |||
742 | #define IXGBE_PBACLR_82599 0x11068 | 742 | #define IXGBE_PBACLR_82599 0x11068 |
743 | #define IXGBE_CIAA_82599 0x11088 | 743 | #define IXGBE_CIAA_82599 0x11088 |
744 | #define IXGBE_CIAD_82599 0x1108C | 744 | #define IXGBE_CIAD_82599 0x1108C |
745 | #define IXGBE_PCIE_DIAG_0_82599 0x11090 | 745 | #define IXGBE_PICAUSE 0x110B0 |
746 | #define IXGBE_PCIE_DIAG_1_82599 0x11094 | 746 | #define IXGBE_PIENA 0x110B8 |
747 | #define IXGBE_PCIE_DIAG_2_82599 0x11098 | ||
748 | #define IXGBE_PCIE_DIAG_3_82599 0x1109C | ||
749 | #define IXGBE_PCIE_DIAG_4_82599 0x110A0 | ||
750 | #define IXGBE_PCIE_DIAG_5_82599 0x110A4 | ||
751 | #define IXGBE_PCIE_DIAG_6_82599 0x110A8 | ||
752 | #define IXGBE_PCIE_DIAG_7_82599 0x110C0 | ||
753 | #define IXGBE_INTRPT_CSR_82599 0x110B0 | ||
754 | #define IXGBE_INTRPT_MASK_82599 0x110B8 | ||
755 | #define IXGBE_CDQ_MBR_82599 0x110B4 | 747 | #define IXGBE_CDQ_MBR_82599 0x110B4 |
748 | #define IXGBE_PCIESPARE 0x110BC | ||
756 | #define IXGBE_MISC_REG_82599 0x110F0 | 749 | #define IXGBE_MISC_REG_82599 0x110F0 |
757 | #define IXGBE_ECC_CTRL_0_82599 0x11100 | 750 | #define IXGBE_ECC_CTRL_0_82599 0x11100 |
758 | #define IXGBE_ECC_CTRL_1_82599 0x11104 | 751 | #define IXGBE_ECC_CTRL_1_82599 0x11104 |
@@ -785,7 +778,19 @@ | |||
785 | #define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ | 778 | #define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ |
786 | #define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ | 779 | #define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ |
787 | #define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ | 780 | #define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ |
788 | #define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */ | 781 | #define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ |
782 | #define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ | ||
783 | #define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ | ||
784 | #define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ | ||
785 | #define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ | ||
786 | #define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ | ||
787 | #define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ | ||
788 | #define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ | ||
789 | #define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ | ||
790 | #define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ | ||
791 | #define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ | ||
792 | #define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ | ||
793 | #define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ | ||
789 | 794 | ||
790 | /* Diagnostic Registers */ | 795 | /* Diagnostic Registers */ |
791 | #define IXGBE_RDSTATCTL 0x02C20 | 796 | #define IXGBE_RDSTATCTL 0x02C20 |
@@ -829,8 +834,20 @@ | |||
829 | #define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ | 834 | #define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ |
830 | #define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ | 835 | #define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ |
831 | #define IXGBE_PCIEECCCTL 0x1106C | 836 | #define IXGBE_PCIEECCCTL 0x1106C |
837 | #define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ | ||
838 | #define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ | ||
839 | #define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ | ||
840 | #define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ | ||
841 | #define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ | ||
842 | #define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ | ||
843 | #define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ | ||
844 | #define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ | ||
832 | #define IXGBE_PCIEECCCTL0 0x11100 | 845 | #define IXGBE_PCIEECCCTL0 0x11100 |
833 | #define IXGBE_PCIEECCCTL1 0x11104 | 846 | #define IXGBE_PCIEECCCTL1 0x11104 |
847 | #define IXGBE_RXDBUECC 0x03F70 | ||
848 | #define IXGBE_TXDBUECC 0x0CF70 | ||
849 | #define IXGBE_RXDBUEST 0x03F74 | ||
850 | #define IXGBE_TXDBUEST 0x0CF74 | ||
834 | #define IXGBE_PBTXECC 0x0C300 | 851 | #define IXGBE_PBTXECC 0x0C300 |
835 | #define IXGBE_PBRXECC 0x03300 | 852 | #define IXGBE_PBRXECC 0x03300 |
836 | #define IXGBE_GHECCR 0x110B0 | 853 | #define IXGBE_GHECCR 0x110B0 |
@@ -871,6 +888,7 @@ | |||
871 | #define IXGBE_AUTOC3 0x042AC | 888 | #define IXGBE_AUTOC3 0x042AC |
872 | #define IXGBE_ANLP1 0x042B0 | 889 | #define IXGBE_ANLP1 0x042B0 |
873 | #define IXGBE_ANLP2 0x042B4 | 890 | #define IXGBE_ANLP2 0x042B4 |
891 | #define IXGBE_MACC 0x04330 | ||
874 | #define IXGBE_ATLASCTL 0x04800 | 892 | #define IXGBE_ATLASCTL 0x04800 |
875 | #define IXGBE_MMNGC 0x042D0 | 893 | #define IXGBE_MMNGC 0x042D0 |
876 | #define IXGBE_ANLPNP1 0x042D4 | 894 | #define IXGBE_ANLPNP1 0x042D4 |
@@ -883,14 +901,49 @@ | |||
883 | #define IXGBE_MPVC 0x04318 | 901 | #define IXGBE_MPVC 0x04318 |
884 | #define IXGBE_SGMIIC 0x04314 | 902 | #define IXGBE_SGMIIC 0x04314 |
885 | 903 | ||
904 | /* Statistics Registers */ | ||
905 | #define IXGBE_RXNFGPC 0x041B0 | ||
906 | #define IXGBE_RXNFGBCL 0x041B4 | ||
907 | #define IXGBE_RXNFGBCH 0x041B8 | ||
908 | #define IXGBE_RXDGPC 0x02F50 | ||
909 | #define IXGBE_RXDGBCL 0x02F54 | ||
910 | #define IXGBE_RXDGBCH 0x02F58 | ||
911 | #define IXGBE_RXDDGPC 0x02F5C | ||
912 | #define IXGBE_RXDDGBCL 0x02F60 | ||
913 | #define IXGBE_RXDDGBCH 0x02F64 | ||
914 | #define IXGBE_RXLPBKGPC 0x02F68 | ||
915 | #define IXGBE_RXLPBKGBCL 0x02F6C | ||
916 | #define IXGBE_RXLPBKGBCH 0x02F70 | ||
917 | #define IXGBE_RXDLPBKGPC 0x02F74 | ||
918 | #define IXGBE_RXDLPBKGBCL 0x02F78 | ||
919 | #define IXGBE_RXDLPBKGBCH 0x02F7C | ||
920 | #define IXGBE_TXDGPC 0x087A0 | ||
921 | #define IXGBE_TXDGBCL 0x087A4 | ||
922 | #define IXGBE_TXDGBCH 0x087A8 | ||
923 | |||
924 | #define IXGBE_RXDSTATCTRL 0x02F40 | ||
925 | |||
926 | /* Copper Pond 2 link timeout */ | ||
886 | #define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 | 927 | #define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 |
887 | 928 | ||
888 | /* Omer CORECTL */ | 929 | /* Omer CORECTL */ |
889 | #define IXGBE_CORECTL 0x014F00 | 930 | #define IXGBE_CORECTL 0x014F00 |
890 | /* BARCTRL */ | 931 | /* BARCTRL */ |
891 | #define IXGBE_BARCTRL 0x110F4 | 932 | #define IXGBE_BARCTRL 0x110F4 |
892 | #define IXGBE_BARCTRL_FLSIZE 0x0700 | 933 | #define IXGBE_BARCTRL_FLSIZE 0x0700 |
893 | #define IXGBE_BARCTRL_CSRSIZE 0x2000 | 934 | #define IXGBE_BARCTRL_FLSIZE_SHIFT 8 |
935 | #define IXGBE_BARCTRL_CSRSIZE 0x2000 | ||
936 | |||
937 | /* RSCCTL Bit Masks */ | ||
938 | #define IXGBE_RSCCTL_RSCEN 0x01 | ||
939 | #define IXGBE_RSCCTL_MAXDESC_1 0x00 | ||
940 | #define IXGBE_RSCCTL_MAXDESC_4 0x04 | ||
941 | #define IXGBE_RSCCTL_MAXDESC_8 0x08 | ||
942 | #define IXGBE_RSCCTL_MAXDESC_16 0x0C | ||
943 | |||
944 | /* RSCDBU Bit Masks */ | ||
945 | #define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F | ||
946 | #define IXGBE_RSCDBU_RSCACKDIS 0x00000080 | ||
894 | 947 | ||
895 | /* RDRXCTL Bit Masks */ | 948 | /* RDRXCTL Bit Masks */ |
896 | #define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ | 949 | #define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ |
@@ -898,6 +951,8 @@ | |||
898 | #define IXGBE_RDRXCTL_MVMEN 0x00000020 | 951 | #define IXGBE_RDRXCTL_MVMEN 0x00000020 |
899 | #define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ | 952 | #define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ |
900 | #define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ | 953 | #define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ |
954 | #define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ | ||
955 | #define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ | ||
901 | #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ | 956 | #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ |
902 | #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ | 957 | #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ |
903 | 958 | ||
@@ -969,8 +1024,8 @@ | |||
969 | #define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ | 1024 | #define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ |
970 | #define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ | 1025 | #define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ |
971 | #define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ | 1026 | #define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ |
972 | #define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */ | 1027 | #define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ |
973 | #define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/ | 1028 | #define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ |
974 | #define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ | 1029 | #define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ |
975 | #define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ | 1030 | #define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ |
976 | #define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ | 1031 | #define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ |
@@ -1057,6 +1112,7 @@ | |||
1057 | #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ | 1112 | #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ |
1058 | #define IXGBE_GPIE_EIAME 0x40000000 | 1113 | #define IXGBE_GPIE_EIAME 0x40000000 |
1059 | #define IXGBE_GPIE_PBA_SUPPORT 0x80000000 | 1114 | #define IXGBE_GPIE_PBA_SUPPORT 0x80000000 |
1115 | #define IXGBE_GPIE_RSC_DELAY_SHIFT 11 | ||
1060 | #define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ | 1116 | #define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ |
1061 | #define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ | 1117 | #define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ |
1062 | #define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ | 1118 | #define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ |
@@ -1291,6 +1347,11 @@ | |||
1291 | #define IXGBE_FTQF_POOL_SHIFT 8 | 1347 | #define IXGBE_FTQF_POOL_SHIFT 8 |
1292 | #define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F | 1348 | #define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F |
1293 | #define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 | 1349 | #define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 |
1350 | #define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E | ||
1351 | #define IXGBE_FTQF_DEST_ADDR_MASK 0x1D | ||
1352 | #define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B | ||
1353 | #define IXGBE_FTQF_DEST_PORT_MASK 0x17 | ||
1354 | #define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F | ||
1294 | #define IXGBE_FTQF_POOL_MASK_EN 0x40000000 | 1355 | #define IXGBE_FTQF_POOL_MASK_EN 0x40000000 |
1295 | #define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 | 1356 | #define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 |
1296 | 1357 | ||
@@ -1333,11 +1394,11 @@ | |||
1333 | * | 1394 | * |
1334 | * Current filters: | 1395 | * Current filters: |
1335 | * EAPOL 802.1x (0x888e): Filter 0 | 1396 | * EAPOL 802.1x (0x888e): Filter 0 |
1336 | * BCN (0x8904): Filter 1 | 1397 | * FCoE (0x8906): Filter 2 |
1337 | * 1588 (0x88f7): Filter 3 | 1398 | * 1588 (0x88f7): Filter 3 |
1399 | * FIP (0x8914): Filter 4 | ||
1338 | */ | 1400 | */ |
1339 | #define IXGBE_ETQF_FILTER_EAPOL 0 | 1401 | #define IXGBE_ETQF_FILTER_EAPOL 0 |
1340 | #define IXGBE_ETQF_FILTER_BCN 1 | ||
1341 | #define IXGBE_ETQF_FILTER_FCOE 2 | 1402 | #define IXGBE_ETQF_FILTER_FCOE 2 |
1342 | #define IXGBE_ETQF_FILTER_1588 3 | 1403 | #define IXGBE_ETQF_FILTER_1588 3 |
1343 | #define IXGBE_ETQF_FILTER_FIP 4 | 1404 | #define IXGBE_ETQF_FILTER_FIP 4 |
@@ -1448,6 +1509,11 @@ | |||
1448 | #define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) | 1509 | #define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) |
1449 | #define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) | 1510 | #define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) |
1450 | 1511 | ||
1512 | #define IXGBE_MACC_FLU 0x00000001 | ||
1513 | #define IXGBE_MACC_FSV_10G 0x00030000 | ||
1514 | #define IXGBE_MACC_FS 0x00040000 | ||
1515 | #define IXGBE_MAC_RX2TX_LPBK 0x00000002 | ||
1516 | |||
1451 | /* LINKS Bit Masks */ | 1517 | /* LINKS Bit Masks */ |
1452 | #define IXGBE_LINKS_KX_AN_COMP 0x80000000 | 1518 | #define IXGBE_LINKS_KX_AN_COMP 0x80000000 |
1453 | #define IXGBE_LINKS_UP 0x40000000 | 1519 | #define IXGBE_LINKS_UP 0x40000000 |
@@ -1501,7 +1567,6 @@ | |||
1501 | #define IXGBE_ANLP1_ASM_PAUSE 0x0800 | 1567 | #define IXGBE_ANLP1_ASM_PAUSE 0x0800 |
1502 | #define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 | 1568 | #define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 |
1503 | 1569 | ||
1504 | |||
1505 | /* SW Semaphore Register bitmasks */ | 1570 | /* SW Semaphore Register bitmasks */ |
1506 | #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ | 1571 | #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ |
1507 | #define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ | 1572 | #define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ |
@@ -1514,6 +1579,10 @@ | |||
1514 | #define IXGBE_GSSR_PHY1_SM 0x0004 | 1579 | #define IXGBE_GSSR_PHY1_SM 0x0004 |
1515 | #define IXGBE_GSSR_MAC_CSR_SM 0x0008 | 1580 | #define IXGBE_GSSR_MAC_CSR_SM 0x0008 |
1516 | #define IXGBE_GSSR_FLASH_SM 0x0010 | 1581 | #define IXGBE_GSSR_FLASH_SM 0x0010 |
1582 | #define IXGBE_GSSR_SW_MNG_SM 0x0400 | ||
1583 | |||
1584 | /* FW Status register bitmask */ | ||
1585 | #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ | ||
1517 | 1586 | ||
1518 | /* EEC Register */ | 1587 | /* EEC Register */ |
1519 | #define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ | 1588 | #define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ |
@@ -1534,6 +1603,7 @@ | |||
1534 | /* EEPROM Addressing bits based on type (0-small, 1-large) */ | 1603 | /* EEPROM Addressing bits based on type (0-small, 1-large) */ |
1535 | #define IXGBE_EEC_ADDR_SIZE 0x00000400 | 1604 | #define IXGBE_EEC_ADDR_SIZE 0x00000400 |
1536 | #define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ | 1605 | #define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ |
1606 | #define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ | ||
1537 | 1607 | ||
1538 | #define IXGBE_EEC_SIZE_SHIFT 11 | 1608 | #define IXGBE_EEC_SIZE_SHIFT 11 |
1539 | #define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 | 1609 | #define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 |
@@ -1563,8 +1633,10 @@ | |||
1563 | #define IXGBE_FW_PTR 0x0F | 1633 | #define IXGBE_FW_PTR 0x0F |
1564 | #define IXGBE_PBANUM0_PTR 0x15 | 1634 | #define IXGBE_PBANUM0_PTR 0x15 |
1565 | #define IXGBE_PBANUM1_PTR 0x16 | 1635 | #define IXGBE_PBANUM1_PTR 0x16 |
1566 | #define IXGBE_DEVICE_CAPS 0x2C | 1636 | #define IXGBE_FREE_SPACE_PTR 0X3E |
1567 | #define IXGBE_SAN_MAC_ADDR_PTR 0x28 | 1637 | #define IXGBE_SAN_MAC_ADDR_PTR 0x28 |
1638 | #define IXGBE_DEVICE_CAPS 0x2C | ||
1639 | #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 | ||
1568 | #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 | 1640 | #define IXGBE_PCIE_MSIX_82599_CAPS 0x72 |
1569 | #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 | 1641 | #define IXGBE_PCIE_MSIX_82598_CAPS 0x62 |
1570 | 1642 | ||
@@ -1601,6 +1673,10 @@ | |||
1601 | 1673 | ||
1602 | #define IXGBE_ETH_LENGTH_OF_ADDRESS 6 | 1674 | #define IXGBE_ETH_LENGTH_OF_ADDRESS 6 |
1603 | 1675 | ||
1676 | #define IXGBE_EEPROM_PAGE_SIZE_MAX 128 | ||
1677 | #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ | ||
1678 | #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ | ||
1679 | |||
1604 | #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS | 1680 | #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS |
1605 | #define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ | 1681 | #define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ |
1606 | #endif | 1682 | #endif |
@@ -1616,14 +1692,25 @@ | |||
1616 | #define IXGBE_FLUDONE_ATTEMPTS 20000 | 1692 | #define IXGBE_FLUDONE_ATTEMPTS 20000 |
1617 | #endif | 1693 | #endif |
1618 | 1694 | ||
1695 | #define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ | ||
1696 | #define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ | ||
1697 | #define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ | ||
1698 | #define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ | ||
1699 | |||
1619 | #define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 | 1700 | #define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 |
1620 | #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 | 1701 | #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 |
1621 | #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 | 1702 | #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 |
1622 | #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 | 1703 | #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 |
1704 | #define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 | ||
1705 | #define IXGBE_FW_LESM_STATE_1 0x1 | ||
1706 | #define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ | ||
1623 | #define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 | 1707 | #define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 |
1624 | #define IXGBE_FW_PATCH_VERSION_4 0x7 | 1708 | #define IXGBE_FW_PATCH_VERSION_4 0x7 |
1625 | 1709 | #define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ | |
1626 | /* Alternative SAN MAC Address Block */ | 1710 | #define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ |
1711 | #define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ | ||
1712 | #define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ | ||
1713 | #define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ | ||
1627 | #define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ | 1714 | #define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ |
1628 | #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ | 1715 | #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ |
1629 | #define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ | 1716 | #define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ |
@@ -1688,6 +1775,7 @@ | |||
1688 | /* Transmit Config masks */ | 1775 | /* Transmit Config masks */ |
1689 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ | 1776 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ |
1690 | #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ | 1777 | #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ |
1778 | #define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ | ||
1691 | /* Enable short packet padding to 64 bytes */ | 1779 | /* Enable short packet padding to 64 bytes */ |
1692 | #define IXGBE_TX_PAD_ENABLE 0x00000400 | 1780 | #define IXGBE_TX_PAD_ENABLE 0x00000400 |
1693 | #define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ | 1781 | #define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ |
@@ -1701,9 +1789,9 @@ | |||
1701 | #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ | 1789 | #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ |
1702 | #define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ | 1790 | #define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ |
1703 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 1791 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ |
1704 | #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ | ||
1705 | #define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ | 1792 | #define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ |
1706 | #define IXGBE_RXDCTL_RLPML_EN 0x00008000 | 1793 | #define IXGBE_RXDCTL_RLPML_EN 0x00008000 |
1794 | #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ | ||
1707 | 1795 | ||
1708 | #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ | 1796 | #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ |
1709 | #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ | 1797 | #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ |
@@ -1719,6 +1807,8 @@ | |||
1719 | #define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ | 1807 | #define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ |
1720 | #define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ | 1808 | #define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ |
1721 | 1809 | ||
1810 | #define IXGBE_MFLCN_RPFCE_SHIFT 4 | ||
1811 | |||
1722 | /* Multiple Receive Queue Control */ | 1812 | /* Multiple Receive Queue Control */ |
1723 | #define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ | 1813 | #define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ |
1724 | #define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ | 1814 | #define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ |
@@ -1859,6 +1949,8 @@ | |||
1859 | #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 | 1949 | #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 |
1860 | #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 | 1950 | #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 |
1861 | #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 | 1951 | #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 |
1952 | #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 | ||
1953 | #define IXGBE_RXDADV_RSCCNT_SHIFT 17 | ||
1862 | #define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 | 1954 | #define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 |
1863 | #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 | 1955 | #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 |
1864 | #define IXGBE_RXDADV_SPH 0x8000 | 1956 | #define IXGBE_RXDADV_SPH 0x8000 |
@@ -1934,15 +2026,6 @@ | |||
1934 | #define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) | 2026 | #define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) |
1935 | #define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) | 2027 | #define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) |
1936 | 2028 | ||
1937 | /* Little Endian defines */ | ||
1938 | #ifndef __le32 | ||
1939 | #define __le32 u32 | ||
1940 | #endif | ||
1941 | #ifndef __le64 | ||
1942 | #define __le64 u64 | ||
1943 | |||
1944 | #endif | ||
1945 | |||
1946 | enum ixgbe_fdir_pballoc_type { | 2029 | enum ixgbe_fdir_pballoc_type { |
1947 | IXGBE_FDIR_PBALLOC_64K = 0, | 2030 | IXGBE_FDIR_PBALLOC_64K = 0, |
1948 | IXGBE_FDIR_PBALLOC_128K, | 2031 | IXGBE_FDIR_PBALLOC_128K, |
@@ -2141,8 +2224,6 @@ typedef u32 ixgbe_link_speed; | |||
2141 | IXGBE_LINK_SPEED_1GB_FULL | \ | 2224 | IXGBE_LINK_SPEED_1GB_FULL | \ |
2142 | IXGBE_LINK_SPEED_10GB_FULL) | 2225 | IXGBE_LINK_SPEED_10GB_FULL) |
2143 | 2226 | ||
2144 | #define IXGBE_PCIE_DEV_CTRL_2 0xC8 | ||
2145 | #define PCIE_COMPL_TO_VALUE 0x05 | ||
2146 | 2227 | ||
2147 | /* Physical layer type */ | 2228 | /* Physical layer type */ |
2148 | typedef u32 ixgbe_physical_layer; | 2229 | typedef u32 ixgbe_physical_layer; |
@@ -2315,6 +2396,7 @@ enum ixgbe_sfp_type { | |||
2315 | enum ixgbe_media_type { | 2396 | enum ixgbe_media_type { |
2316 | ixgbe_media_type_unknown = 0, | 2397 | ixgbe_media_type_unknown = 0, |
2317 | ixgbe_media_type_fiber, | 2398 | ixgbe_media_type_fiber, |
2399 | ixgbe_media_type_fiber_lco, | ||
2318 | ixgbe_media_type_copper, | 2400 | ixgbe_media_type_copper, |
2319 | ixgbe_media_type_backplane, | 2401 | ixgbe_media_type_backplane, |
2320 | ixgbe_media_type_cx4, | 2402 | ixgbe_media_type_cx4, |
@@ -2478,6 +2560,10 @@ struct ixgbe_hw_stats { | |||
2478 | u64 fcoeptc; | 2560 | u64 fcoeptc; |
2479 | u64 fcoedwrc; | 2561 | u64 fcoedwrc; |
2480 | u64 fcoedwtc; | 2562 | u64 fcoedwtc; |
2563 | u64 b2ospc; | ||
2564 | u64 b2ogprc; | ||
2565 | u64 o2bgptc; | ||
2566 | u64 o2bspc; | ||
2481 | }; | 2567 | }; |
2482 | 2568 | ||
2483 | /* forward declaration */ | 2569 | /* forward declaration */ |
@@ -2491,7 +2577,9 @@ typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, | |||
2491 | struct ixgbe_eeprom_operations { | 2577 | struct ixgbe_eeprom_operations { |
2492 | s32 (*init_params)(struct ixgbe_hw *); | 2578 | s32 (*init_params)(struct ixgbe_hw *); |
2493 | s32 (*read)(struct ixgbe_hw *, u16, u16 *); | 2579 | s32 (*read)(struct ixgbe_hw *, u16, u16 *); |
2580 | s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); | ||
2494 | s32 (*write)(struct ixgbe_hw *, u16, u16); | 2581 | s32 (*write)(struct ixgbe_hw *, u16, u16); |
2582 | s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); | ||
2495 | s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); | 2583 | s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); |
2496 | s32 (*update_checksum)(struct ixgbe_hw *); | 2584 | s32 (*update_checksum)(struct ixgbe_hw *); |
2497 | u16 (*calc_checksum)(struct ixgbe_hw *); | 2585 | u16 (*calc_checksum)(struct ixgbe_hw *); |
@@ -2577,6 +2665,7 @@ struct ixgbe_eeprom_info { | |||
2577 | u32 semaphore_delay; | 2665 | u32 semaphore_delay; |
2578 | u16 word_size; | 2666 | u16 word_size; |
2579 | u16 address_bits; | 2667 | u16 address_bits; |
2668 | u16 word_page_size; | ||
2580 | }; | 2669 | }; |
2581 | 2670 | ||
2582 | #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 | 2671 | #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 |
@@ -2597,6 +2686,7 @@ struct ixgbe_mac_info { | |||
2597 | u32 vft_size; | 2686 | u32 vft_size; |
2598 | u32 num_rar_entries; | 2687 | u32 num_rar_entries; |
2599 | u32 rar_highwater; | 2688 | u32 rar_highwater; |
2689 | u32 rx_pb_size; | ||
2600 | u32 max_tx_queues; | 2690 | u32 max_tx_queues; |
2601 | u32 max_rx_queues; | 2691 | u32 max_rx_queues; |
2602 | u32 max_msix_vectors; | 2692 | u32 max_msix_vectors; |
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index d9323c08f5c7..4ed687be2fe3 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define IXGBE_X540_RAR_ENTRIES 128 | 37 | #define IXGBE_X540_RAR_ENTRIES 128 |
38 | #define IXGBE_X540_MC_TBL_SIZE 128 | 38 | #define IXGBE_X540_MC_TBL_SIZE 128 |
39 | #define IXGBE_X540_VFT_TBL_SIZE 128 | 39 | #define IXGBE_X540_VFT_TBL_SIZE 128 |
40 | #define IXGBE_X540_RX_PB_SIZE 384 | ||
40 | 41 | ||
41 | static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); | 42 | static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); |
42 | static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); | 43 | static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); |
@@ -226,6 +227,28 @@ mac_reset_top: | |||
226 | } | 227 | } |
227 | 228 | ||
228 | /** | 229 | /** |
230 | * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx | ||
231 | * @hw: pointer to hardware structure | ||
232 | * | ||
233 | * Starts the hardware using the generic start_hw function | ||
234 | * and the generation start_hw function. | ||
235 | * Then performs revision-specific operations, if any. | ||
236 | **/ | ||
237 | static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) | ||
238 | { | ||
239 | s32 ret_val = 0; | ||
240 | |||
241 | ret_val = ixgbe_start_hw_generic(hw); | ||
242 | if (ret_val != 0) | ||
243 | goto out; | ||
244 | |||
245 | ret_val = ixgbe_start_hw_gen2(hw); | ||
246 | hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE; | ||
247 | out: | ||
248 | return ret_val; | ||
249 | } | ||
250 | |||
251 | /** | ||
229 | * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type | 252 | * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type |
230 | * @hw: pointer to hardware structure | 253 | * @hw: pointer to hardware structure |
231 | * | 254 | * |
@@ -281,74 +304,105 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) | |||
281 | } | 304 | } |
282 | 305 | ||
283 | /** | 306 | /** |
284 | * ixgbe_read_eerd_X540 - Read EEPROM word using EERD | 307 | * ixgbe_read_eerd_X540- Read EEPROM word using EERD |
285 | * @hw: pointer to hardware structure | 308 | * @hw: pointer to hardware structure |
286 | * @offset: offset of word in the EEPROM to read | 309 | * @offset: offset of word in the EEPROM to read |
287 | * @data: word read from the EERPOM | 310 | * @data: word read from the EEPROM |
311 | * | ||
312 | * Reads a 16 bit word from the EEPROM using the EERD register. | ||
288 | **/ | 313 | **/ |
289 | static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) | 314 | static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) |
290 | { | 315 | { |
291 | s32 status; | 316 | s32 status = 0; |
292 | 317 | ||
293 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) | 318 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == |
319 | 0) | ||
294 | status = ixgbe_read_eerd_generic(hw, offset, data); | 320 | status = ixgbe_read_eerd_generic(hw, offset, data); |
295 | else | 321 | else |
296 | status = IXGBE_ERR_SWFW_SYNC; | 322 | status = IXGBE_ERR_SWFW_SYNC; |
297 | 323 | ||
298 | ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); | 324 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); |
299 | return status; | 325 | return status; |
300 | } | 326 | } |
301 | 327 | ||
302 | /** | 328 | /** |
303 | * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR | 329 | * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD |
304 | * @hw: pointer to hardware structure | 330 | * @hw: pointer to hardware structure |
305 | * @offset: offset of word in the EEPROM to write | 331 | * @offset: offset of word in the EEPROM to read |
306 | * @data: word write to the EEPROM | 332 | * @words: number of words |
333 | * @data: word(s) read from the EEPROM | ||
307 | * | 334 | * |
308 | * Write a 16 bit word to the EEPROM using the EEWR register. | 335 | * Reads a 16 bit word(s) from the EEPROM using the EERD register. |
309 | **/ | 336 | **/ |
310 | static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) | 337 | static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, |
338 | u16 offset, u16 words, u16 *data) | ||
311 | { | 339 | { |
312 | u32 eewr; | 340 | s32 status = 0; |
313 | s32 status; | ||
314 | 341 | ||
315 | hw->eeprom.ops.init_params(hw); | 342 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == |
343 | 0) | ||
344 | status = ixgbe_read_eerd_buffer_generic(hw, offset, | ||
345 | words, data); | ||
346 | else | ||
347 | status = IXGBE_ERR_SWFW_SYNC; | ||
316 | 348 | ||
317 | if (offset >= hw->eeprom.word_size) { | 349 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); |
318 | status = IXGBE_ERR_EEPROM; | 350 | return status; |
319 | goto out; | 351 | } |
320 | } | ||
321 | 352 | ||
322 | eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | | 353 | /** |
323 | (data << IXGBE_EEPROM_RW_REG_DATA) | | 354 | * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR |
324 | IXGBE_EEPROM_RW_REG_START; | 355 | * @hw: pointer to hardware structure |
356 | * @offset: offset of word in the EEPROM to write | ||
357 | * @data: word write to the EEPROM | ||
358 | * | ||
359 | * Write a 16 bit word to the EEPROM using the EEWR register. | ||
360 | **/ | ||
361 | static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) | ||
362 | { | ||
363 | s32 status = 0; | ||
325 | 364 | ||
326 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { | 365 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) |
327 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); | 366 | status = ixgbe_write_eewr_generic(hw, offset, data); |
328 | if (status != 0) { | 367 | else |
329 | hw_dbg(hw, "Eeprom write EEWR timed out\n"); | 368 | status = IXGBE_ERR_SWFW_SYNC; |
330 | goto out; | ||
331 | } | ||
332 | 369 | ||
333 | IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); | 370 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); |
371 | return status; | ||
372 | } | ||
334 | 373 | ||
335 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); | 374 | /** |
336 | if (status != 0) { | 375 | * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR |
337 | hw_dbg(hw, "Eeprom write EEWR timed out\n"); | 376 | * @hw: pointer to hardware structure |
338 | goto out; | 377 | * @offset: offset of word in the EEPROM to write |
339 | } | 378 | * @words: number of words |
340 | } else { | 379 | * @data: word(s) write to the EEPROM |
380 | * | ||
381 | * Write a 16 bit word(s) to the EEPROM using the EEWR register. | ||
382 | **/ | ||
383 | static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, | ||
384 | u16 offset, u16 words, u16 *data) | ||
385 | { | ||
386 | s32 status = 0; | ||
387 | |||
388 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == | ||
389 | 0) | ||
390 | status = ixgbe_write_eewr_buffer_generic(hw, offset, | ||
391 | words, data); | ||
392 | else | ||
341 | status = IXGBE_ERR_SWFW_SYNC; | 393 | status = IXGBE_ERR_SWFW_SYNC; |
342 | } | ||
343 | 394 | ||
344 | out: | 395 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); |
345 | ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); | ||
346 | return status; | 396 | return status; |
347 | } | 397 | } |
348 | 398 | ||
349 | /** | 399 | /** |
350 | * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum | 400 | * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum |
351 | * @hw: pointer to hardware structure | 401 | * |
402 | * This function does not use synchronization for EERD and EEWR. It can | ||
403 | * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. | ||
404 | * | ||
405 | * @hw: pointer to hardware structure | ||
352 | **/ | 406 | **/ |
353 | static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | 407 | static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) |
354 | { | 408 | { |
@@ -359,9 +413,15 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | |||
359 | u16 pointer = 0; | 413 | u16 pointer = 0; |
360 | u16 word = 0; | 414 | u16 word = 0; |
361 | 415 | ||
416 | /* | ||
417 | * Do not use hw->eeprom.ops.read because we do not want to take | ||
418 | * the synchronization semaphores here. Instead use | ||
419 | * ixgbe_read_eerd_generic | ||
420 | */ | ||
421 | |||
362 | /* Include 0x0-0x3F in the checksum */ | 422 | /* Include 0x0-0x3F in the checksum */ |
363 | for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { | 423 | for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { |
364 | if (hw->eeprom.ops.read(hw, i, &word) != 0) { | 424 | if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { |
365 | hw_dbg(hw, "EEPROM read failed\n"); | 425 | hw_dbg(hw, "EEPROM read failed\n"); |
366 | break; | 426 | break; |
367 | } | 427 | } |
@@ -376,7 +436,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | |||
376 | if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) | 436 | if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) |
377 | continue; | 437 | continue; |
378 | 438 | ||
379 | if (hw->eeprom.ops.read(hw, i, &pointer) != 0) { | 439 | if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { |
380 | hw_dbg(hw, "EEPROM read failed\n"); | 440 | hw_dbg(hw, "EEPROM read failed\n"); |
381 | break; | 441 | break; |
382 | } | 442 | } |
@@ -386,7 +446,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | |||
386 | pointer >= hw->eeprom.word_size) | 446 | pointer >= hw->eeprom.word_size) |
387 | continue; | 447 | continue; |
388 | 448 | ||
389 | if (hw->eeprom.ops.read(hw, pointer, &length) != 0) { | 449 | if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { |
390 | hw_dbg(hw, "EEPROM read failed\n"); | 450 | hw_dbg(hw, "EEPROM read failed\n"); |
391 | break; | 451 | break; |
392 | } | 452 | } |
@@ -397,7 +457,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | |||
397 | continue; | 457 | continue; |
398 | 458 | ||
399 | for (j = pointer+1; j <= pointer+length; j++) { | 459 | for (j = pointer+1; j <= pointer+length; j++) { |
400 | if (hw->eeprom.ops.read(hw, j, &word) != 0) { | 460 | if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { |
401 | hw_dbg(hw, "EEPROM read failed\n"); | 461 | hw_dbg(hw, "EEPROM read failed\n"); |
402 | break; | 462 | break; |
403 | } | 463 | } |
@@ -411,6 +471,62 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | |||
411 | } | 471 | } |
412 | 472 | ||
413 | /** | 473 | /** |
474 | * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum | ||
475 | * @hw: pointer to hardware structure | ||
476 | * @checksum_val: calculated checksum | ||
477 | * | ||
478 | * Performs checksum calculation and validates the EEPROM checksum. If the | ||
479 | * caller does not need checksum_val, the value can be NULL. | ||
480 | **/ | ||
481 | static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, | ||
482 | u16 *checksum_val) | ||
483 | { | ||
484 | s32 status; | ||
485 | u16 checksum; | ||
486 | u16 read_checksum = 0; | ||
487 | |||
488 | /* | ||
489 | * Read the first word from the EEPROM. If this times out or fails, do | ||
490 | * not continue or we could be in for a very long wait while every | ||
491 | * EEPROM read fails | ||
492 | */ | ||
493 | status = hw->eeprom.ops.read(hw, 0, &checksum); | ||
494 | |||
495 | if (status != 0) { | ||
496 | hw_dbg(hw, "EEPROM read failed\n"); | ||
497 | goto out; | ||
498 | } | ||
499 | |||
500 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { | ||
501 | checksum = hw->eeprom.ops.calc_checksum(hw); | ||
502 | |||
503 | /* | ||
504 | * Do not use hw->eeprom.ops.read because we do not want to take | ||
505 | * the synchronization semaphores twice here. | ||
506 | */ | ||
507 | ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, | ||
508 | &read_checksum); | ||
509 | |||
510 | /* | ||
511 | * Verify read checksum from EEPROM is the same as | ||
512 | * calculated checksum | ||
513 | */ | ||
514 | if (read_checksum != checksum) | ||
515 | status = IXGBE_ERR_EEPROM_CHECKSUM; | ||
516 | |||
517 | /* If the user cares, return the calculated checksum */ | ||
518 | if (checksum_val) | ||
519 | *checksum_val = checksum; | ||
520 | } else { | ||
521 | status = IXGBE_ERR_SWFW_SYNC; | ||
522 | } | ||
523 | |||
524 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); | ||
525 | out: | ||
526 | return status; | ||
527 | } | ||
528 | |||
529 | /** | ||
414 | * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash | 530 | * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash |
415 | * @hw: pointer to hardware structure | 531 | * @hw: pointer to hardware structure |
416 | * | 532 | * |
@@ -421,11 +537,35 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | |||
421 | static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) | 537 | static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) |
422 | { | 538 | { |
423 | s32 status; | 539 | s32 status; |
540 | u16 checksum; | ||
541 | |||
542 | /* | ||
543 | * Read the first word from the EEPROM. If this times out or fails, do | ||
544 | * not continue or we could be in for a very long wait while every | ||
545 | * EEPROM read fails | ||
546 | */ | ||
547 | status = hw->eeprom.ops.read(hw, 0, &checksum); | ||
548 | |||
549 | if (status != 0) | ||
550 | hw_dbg(hw, "EEPROM read failed\n"); | ||
424 | 551 | ||
425 | status = ixgbe_update_eeprom_checksum_generic(hw); | 552 | if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { |
553 | checksum = hw->eeprom.ops.calc_checksum(hw); | ||
554 | |||
555 | /* | ||
556 | * Do not use hw->eeprom.ops.write because we do not want to | ||
557 | * take the synchronization semaphores twice here. | ||
558 | */ | ||
559 | status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, | ||
560 | checksum); | ||
426 | 561 | ||
427 | if (status) | 562 | if (status == 0) |
428 | status = ixgbe_update_flash_X540(hw); | 563 | status = ixgbe_update_flash_X540(hw); |
564 | else | ||
565 | status = IXGBE_ERR_SWFW_SYNC; | ||
566 | } | ||
567 | |||
568 | hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); | ||
429 | 569 | ||
430 | return status; | 570 | return status; |
431 | } | 571 | } |
@@ -452,7 +592,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) | |||
452 | IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); | 592 | IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); |
453 | 593 | ||
454 | status = ixgbe_poll_flash_update_done_X540(hw); | 594 | status = ixgbe_poll_flash_update_done_X540(hw); |
455 | if (status) | 595 | if (status == 0) |
456 | hw_dbg(hw, "Flash update complete\n"); | 596 | hw_dbg(hw, "Flash update complete\n"); |
457 | else | 597 | else |
458 | hw_dbg(hw, "Flash update time out\n"); | 598 | hw_dbg(hw, "Flash update time out\n"); |
@@ -466,11 +606,10 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) | |||
466 | } | 606 | } |
467 | 607 | ||
468 | status = ixgbe_poll_flash_update_done_X540(hw); | 608 | status = ixgbe_poll_flash_update_done_X540(hw); |
469 | if (status) | 609 | if (status == 0) |
470 | hw_dbg(hw, "Flash update complete\n"); | 610 | hw_dbg(hw, "Flash update complete\n"); |
471 | else | 611 | else |
472 | hw_dbg(hw, "Flash update time out\n"); | 612 | hw_dbg(hw, "Flash update time out\n"); |
473 | |||
474 | } | 613 | } |
475 | out: | 614 | out: |
476 | return status; | 615 | return status; |
@@ -542,7 +681,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) | |||
542 | * resource (swmask) | 681 | * resource (swmask) |
543 | */ | 682 | */ |
544 | ixgbe_release_swfw_sync_semaphore(hw); | 683 | ixgbe_release_swfw_sync_semaphore(hw); |
545 | msleep(5); | 684 | usleep_range(5000, 10000); |
546 | } | 685 | } |
547 | } | 686 | } |
548 | 687 | ||
@@ -564,7 +703,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) | |||
564 | } | 703 | } |
565 | } | 704 | } |
566 | 705 | ||
567 | msleep(5); | 706 | usleep_range(5000, 10000); |
568 | return 0; | 707 | return 0; |
569 | } | 708 | } |
570 | 709 | ||
@@ -588,7 +727,7 @@ static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) | |||
588 | IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); | 727 | IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); |
589 | 728 | ||
590 | ixgbe_release_swfw_sync_semaphore(hw); | 729 | ixgbe_release_swfw_sync_semaphore(hw); |
591 | msleep(5); | 730 | usleep_range(5000, 10000); |
592 | } | 731 | } |
593 | 732 | ||
594 | /** | 733 | /** |
@@ -658,10 +797,70 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) | |||
658 | IXGBE_WRITE_FLUSH(hw); | 797 | IXGBE_WRITE_FLUSH(hw); |
659 | } | 798 | } |
660 | 799 | ||
800 | /** | ||
801 | * ixgbe_blink_led_start_X540 - Blink LED based on index. | ||
802 | * @hw: pointer to hardware structure | ||
803 | * @index: led number to blink | ||
804 | * | ||
805 | * Devices that implement the version 2 interface: | ||
806 | * X540 | ||
807 | **/ | ||
808 | static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) | ||
809 | { | ||
810 | u32 macc_reg; | ||
811 | u32 ledctl_reg; | ||
812 | |||
813 | /* | ||
814 | * In order for the blink bit in the LED control register | ||
815 | * to work, link and speed must be forced in the MAC. We | ||
816 | * will reverse this when we stop the blinking. | ||
817 | */ | ||
818 | macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); | ||
819 | macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; | ||
820 | IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); | ||
821 | |||
822 | /* Set the LED to LINK_UP + BLINK. */ | ||
823 | ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
824 | ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
825 | ledctl_reg |= IXGBE_LED_BLINK(index); | ||
826 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); | ||
827 | IXGBE_WRITE_FLUSH(hw); | ||
828 | |||
829 | return 0; | ||
830 | } | ||
831 | |||
832 | /** | ||
833 | * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. | ||
834 | * @hw: pointer to hardware structure | ||
835 | * @index: led number to stop blinking | ||
836 | * | ||
837 | * Devices that implement the version 2 interface: | ||
838 | * X540 | ||
839 | **/ | ||
840 | static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) | ||
841 | { | ||
842 | u32 macc_reg; | ||
843 | u32 ledctl_reg; | ||
844 | |||
845 | /* Restore the LED to its default value. */ | ||
846 | ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); | ||
847 | ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); | ||
848 | ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); | ||
849 | ledctl_reg &= ~IXGBE_LED_BLINK(index); | ||
850 | IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); | ||
851 | |||
852 | /* Unforce link and speed in the MAC. */ | ||
853 | macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); | ||
854 | macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); | ||
855 | IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); | ||
856 | IXGBE_WRITE_FLUSH(hw); | ||
857 | |||
858 | return 0; | ||
859 | } | ||
661 | static struct ixgbe_mac_operations mac_ops_X540 = { | 860 | static struct ixgbe_mac_operations mac_ops_X540 = { |
662 | .init_hw = &ixgbe_init_hw_generic, | 861 | .init_hw = &ixgbe_init_hw_generic, |
663 | .reset_hw = &ixgbe_reset_hw_X540, | 862 | .reset_hw = &ixgbe_reset_hw_X540, |
664 | .start_hw = &ixgbe_start_hw_generic, | 863 | .start_hw = &ixgbe_start_hw_X540, |
665 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, | 864 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, |
666 | .get_media_type = &ixgbe_get_media_type_X540, | 865 | .get_media_type = &ixgbe_get_media_type_X540, |
667 | .get_supported_physical_layer = | 866 | .get_supported_physical_layer = |
@@ -669,7 +868,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { | |||
669 | .enable_rx_dma = &ixgbe_enable_rx_dma_generic, | 868 | .enable_rx_dma = &ixgbe_enable_rx_dma_generic, |
670 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | 869 | .get_mac_addr = &ixgbe_get_mac_addr_generic, |
671 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, | 870 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, |
672 | .get_device_caps = NULL, | 871 | .get_device_caps = &ixgbe_get_device_caps_generic, |
673 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, | 872 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, |
674 | .stop_adapter = &ixgbe_stop_adapter_generic, | 873 | .stop_adapter = &ixgbe_stop_adapter_generic, |
675 | .get_bus_info = &ixgbe_get_bus_info_generic, | 874 | .get_bus_info = &ixgbe_get_bus_info_generic, |
@@ -681,8 +880,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { | |||
681 | .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, | 880 | .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, |
682 | .led_on = &ixgbe_led_on_generic, | 881 | .led_on = &ixgbe_led_on_generic, |
683 | .led_off = &ixgbe_led_off_generic, | 882 | .led_off = &ixgbe_led_off_generic, |
684 | .blink_led_start = &ixgbe_blink_led_start_generic, | 883 | .blink_led_start = &ixgbe_blink_led_start_X540, |
685 | .blink_led_stop = &ixgbe_blink_led_stop_generic, | 884 | .blink_led_stop = &ixgbe_blink_led_stop_X540, |
686 | .set_rar = &ixgbe_set_rar_generic, | 885 | .set_rar = &ixgbe_set_rar_generic, |
687 | .clear_rar = &ixgbe_clear_rar_generic, | 886 | .clear_rar = &ixgbe_clear_rar_generic, |
688 | .set_vmdq = &ixgbe_set_vmdq_generic, | 887 | .set_vmdq = &ixgbe_set_vmdq_generic, |
@@ -705,9 +904,11 @@ static struct ixgbe_mac_operations mac_ops_X540 = { | |||
705 | static struct ixgbe_eeprom_operations eeprom_ops_X540 = { | 904 | static struct ixgbe_eeprom_operations eeprom_ops_X540 = { |
706 | .init_params = &ixgbe_init_eeprom_params_X540, | 905 | .init_params = &ixgbe_init_eeprom_params_X540, |
707 | .read = &ixgbe_read_eerd_X540, | 906 | .read = &ixgbe_read_eerd_X540, |
907 | .read_buffer = &ixgbe_read_eerd_buffer_X540, | ||
708 | .write = &ixgbe_write_eewr_X540, | 908 | .write = &ixgbe_write_eewr_X540, |
909 | .write_buffer = &ixgbe_write_eewr_buffer_X540, | ||
709 | .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, | 910 | .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, |
710 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | 911 | .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, |
711 | .update_checksum = &ixgbe_update_eeprom_checksum_X540, | 912 | .update_checksum = &ixgbe_update_eeprom_checksum_X540, |
712 | }; | 913 | }; |
713 | 914 | ||