aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-12-05 23:50:54 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-05 23:50:54 -0500
commitddd5c50f9bec7ffab5d28c5dd244db8a4c3f27e7 (patch)
tree3a9b20aae9e01f6486a438a192b63fc240a20b5b
parentd8febb77b52ebddb9bd03ccaa5b61005e3a45a85 (diff)
parent0333464f5f7c33965173893b2a7322da6fe751ab (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2014-12-05 This series contains updates to ixgbe and ixgbevf. Alex provides a couple of patches to cleanup ixgbe. First cleans up the page reuse code getting it into a state where all the workarounds needed are in place as well as cleaning up a few minor oversights such as using __free_pages instead of put_page to drop a locally allocated page. Then cleans up the tail writes for the ixgbe descriptor queues. Mark Peterson adds support to lookup MAC addresses in Open Firmware or IDPROM. Emil provides patches for ixgbe and ixgbevf to fix an issue on rmmod and to add support for X550 in the VF driver. First removes the read/write operations to the CIAA/D registers since it can block access to the PCI config space and make use of standard kernel functions for accessing the PCI config space. Then fixes an issue where the driver has logic to free up used data in case any of the checks in ixgbe_probe() fail, however there is a similar set of cleanups that can occur on driver unload in ixgbe_remove() which can cause the rmmod command to crash. Don provides the remaining patches in the series to complete the addition of X550 support into the ixgbe driver. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c403
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c214
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h284
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1432
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
18 files changed, 2404 insertions, 323 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index be2989e60009..35e6fa643c7e 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 5032a602d5c9..b6137be43920 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum {
300 RING_F_ARRAY_SIZE /* must be last in enum set */ 300 RING_F_ARRAY_SIZE /* must be last in enum set */
301}; 301};
302 302
303#define IXGBE_MAX_RSS_INDICES 16 303#define IXGBE_MAX_RSS_INDICES 16
304#define IXGBE_MAX_VMDQ_INDICES 64 304#define IXGBE_MAX_RSS_INDICES_X550 64
305#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ 305#define IXGBE_MAX_VMDQ_INDICES 64
306#define IXGBE_MAX_FCOE_INDICES 8 306#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
307#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 307#define IXGBE_MAX_FCOE_INDICES 8
308#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 308#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
309#define IXGBE_MAX_L2A_QUEUES 4 309#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
310#define IXGBE_BAD_L2A_QUEUE 3 310#define IXGBE_MAX_L2A_QUEUES 4
311#define IXGBE_MAX_MACVLANS 31 311#define IXGBE_BAD_L2A_QUEUE 3
312#define IXGBE_MAX_DCBMACVLANS 8 312#define IXGBE_MAX_MACVLANS 31
313#define IXGBE_MAX_DCBMACVLANS 8
313 314
314struct ixgbe_ring_feature { 315struct ixgbe_ring_feature {
315 u16 limit; /* upper limit on feature indices */ 316 u16 limit; /* upper limit on feature indices */
@@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
553 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 554 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
554} 555}
555 556
556static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
557{
558 writel(value, ring->tail);
559}
560
561#define IXGBE_RX_DESC(R, i) \ 557#define IXGBE_RX_DESC(R, i) \
562 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) 558 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
563#define IXGBE_TX_DESC(R, i) \ 559#define IXGBE_TX_DESC(R, i) \
@@ -769,6 +765,21 @@ struct ixgbe_adapter {
769 unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ 765 unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
770}; 766};
771 767
768static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
769{
770 switch (adapter->hw.mac.type) {
771 case ixgbe_mac_82598EB:
772 case ixgbe_mac_82599EB:
773 case ixgbe_mac_X540:
774 return IXGBE_MAX_RSS_INDICES;
775 case ixgbe_mac_X550:
776 case ixgbe_mac_X550EM_x:
777 return IXGBE_MAX_RSS_INDICES_X550;
778 default:
779 return 0;
780 }
781}
782
772struct ixgbe_fdir_filter { 783struct ixgbe_fdir_filter {
773 struct hlist_node fdir_node; 784 struct hlist_node fdir_node;
774 union ixgbe_atr_input filter; 785 union ixgbe_atr_input filter;
@@ -804,11 +815,15 @@ enum ixgbe_boards {
804 board_82598, 815 board_82598,
805 board_82599, 816 board_82599,
806 board_X540, 817 board_X540,
818 board_X550,
819 board_X550EM_x,
807}; 820};
808 821
809extern struct ixgbe_info ixgbe_82598_info; 822extern struct ixgbe_info ixgbe_82598_info;
810extern struct ixgbe_info ixgbe_82599_info; 823extern struct ixgbe_info ixgbe_82599_info;
811extern struct ixgbe_info ixgbe_X540_info; 824extern struct ixgbe_info ixgbe_X540_info;
825extern struct ixgbe_info ixgbe_X550_info;
826extern struct ixgbe_info ixgbe_X550EM_x_info;
812#ifdef CONFIG_IXGBE_DCB 827#ifdef CONFIG_IXGBE_DCB
813extern const struct dcbnl_rtnl_ops dcbnl_ops; 828extern const struct dcbnl_rtnl_ops dcbnl_ops;
814#endif 829#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 0e754b4c4220..9c66babd4edd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1625 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum 1625 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1626 * @hw: pointer to hardware structure 1626 * @hw: pointer to hardware structure
1627 **/ 1627 **/
1628u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1628s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1629{ 1629{
1630 u16 i; 1630 u16 i;
1631 u16 j; 1631 u16 j;
@@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1636 1636
1637 /* Include 0x0-0x3F in the checksum */ 1637 /* Include 0x0-0x3F in the checksum */
1638 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 1638 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1639 if (hw->eeprom.ops.read(hw, i, &word) != 0) { 1639 if (hw->eeprom.ops.read(hw, i, &word)) {
1640 hw_dbg(hw, "EEPROM read failed\n"); 1640 hw_dbg(hw, "EEPROM read failed\n");
1641 break; 1641 break;
1642 } 1642 }
@@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1645 1645
1646 /* Include all data from pointers except for the fw pointer */ 1646 /* Include all data from pointers except for the fw pointer */
1647 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 1647 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1648 hw->eeprom.ops.read(hw, i, &pointer); 1648 if (hw->eeprom.ops.read(hw, i, &pointer)) {
1649 hw_dbg(hw, "EEPROM read failed\n");
1650 return IXGBE_ERR_EEPROM;
1651 }
1652
1653 /* If the pointer seems invalid */
1654 if (pointer == 0xFFFF || pointer == 0)
1655 continue;
1649 1656
1650 /* Make sure the pointer seems valid */ 1657 if (hw->eeprom.ops.read(hw, pointer, &length)) {
1651 if (pointer != 0xFFFF && pointer != 0) { 1658 hw_dbg(hw, "EEPROM read failed\n");
1652 hw->eeprom.ops.read(hw, pointer, &length); 1659 return IXGBE_ERR_EEPROM;
1660 }
1661
1662 if (length == 0xFFFF || length == 0)
1663 continue;
1653 1664
1654 if (length != 0xFFFF && length != 0) { 1665 for (j = pointer + 1; j <= pointer + length; j++) {
1655 for (j = pointer+1; j <= pointer+length; j++) { 1666 if (hw->eeprom.ops.read(hw, j, &word)) {
1656 hw->eeprom.ops.read(hw, j, &word); 1667 hw_dbg(hw, "EEPROM read failed\n");
1657 checksum += word; 1668 return IXGBE_ERR_EEPROM;
1658 }
1659 } 1669 }
1670 checksum += word;
1660 } 1671 }
1661 } 1672 }
1662 1673
1663 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 1674 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1664 1675
1665 return checksum; 1676 return (s32)checksum;
1666} 1677}
1667 1678
1668/** 1679/**
@@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1686 * EEPROM read fails 1697 * EEPROM read fails
1687 */ 1698 */
1688 status = hw->eeprom.ops.read(hw, 0, &checksum); 1699 status = hw->eeprom.ops.read(hw, 0, &checksum);
1700 if (status) {
1701 hw_dbg(hw, "EEPROM read failed\n");
1702 return status;
1703 }
1689 1704
1690 if (status == 0) { 1705 status = hw->eeprom.ops.calc_checksum(hw);
1691 checksum = hw->eeprom.ops.calc_checksum(hw); 1706 if (status < 0)
1692 1707 return status;
1693 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1694 1708
1695 /* 1709 checksum = (u16)(status & 0xffff);
1696 * Verify read checksum from EEPROM is the same as
1697 * calculated checksum
1698 */
1699 if (read_checksum != checksum)
1700 status = IXGBE_ERR_EEPROM_CHECKSUM;
1701 1710
1702 /* If the user cares, return the calculated checksum */ 1711 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1703 if (checksum_val) 1712 if (status) {
1704 *checksum_val = checksum;
1705 } else {
1706 hw_dbg(hw, "EEPROM read failed\n"); 1713 hw_dbg(hw, "EEPROM read failed\n");
1714 return status;
1707 } 1715 }
1708 1716
1717 /* Verify read checksum from EEPROM is the same as
1718 * calculated checksum
1719 */
1720 if (read_checksum != checksum)
1721 status = IXGBE_ERR_EEPROM_CHECKSUM;
1722
1723 /* If the user cares, return the calculated checksum */
1724 if (checksum_val)
1725 *checksum_val = checksum;
1726
1709 return status; 1727 return status;
1710} 1728}
1711 1729
@@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1724 * EEPROM read fails 1742 * EEPROM read fails
1725 */ 1743 */
1726 status = hw->eeprom.ops.read(hw, 0, &checksum); 1744 status = hw->eeprom.ops.read(hw, 0, &checksum);
1727 1745 if (status) {
1728 if (status == 0) {
1729 checksum = hw->eeprom.ops.calc_checksum(hw);
1730 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1731 checksum);
1732 } else {
1733 hw_dbg(hw, "EEPROM read failed\n"); 1746 hw_dbg(hw, "EEPROM read failed\n");
1747 return status;
1734 } 1748 }
1735 1749
1750 status = hw->eeprom.ops.calc_checksum(hw);
1751 if (status < 0)
1752 return status;
1753
1754 checksum = (u16)(status & 0xffff);
1755
1756 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1757
1736 return status; 1758 return status;
1737} 1759}
1738 1760
@@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2469 * Acquires the SWFW semaphore through the GSSR register for the specified 2491 * Acquires the SWFW semaphore through the GSSR register for the specified
2470 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2492 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2471 **/ 2493 **/
2472s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2494s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2473{ 2495{
2474 u32 gssr = 0; 2496 u32 gssr = 0;
2475 u32 swmask = mask; 2497 u32 swmask = mask;
@@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2514 * Releases the SWFW semaphore through the GSSR register for the specified 2536 * Releases the SWFW semaphore through the GSSR register for the specified
2515 * function (CSR, PHY0, PHY1, EEPROM, Flash) 2537 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2516 **/ 2538 **/
2517void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 2539void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2518{ 2540{
2519 u32 gssr; 2541 u32 gssr;
2520 u32 swmask = mask; 2542 u32 swmask = mask;
@@ -3446,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3446 * @buffer: contains the command to write and where the return status will 3468 * @buffer: contains the command to write and where the return status will
3447 * be placed 3469 * be placed
3448 * @length: length of buffer, must be multiple of 4 bytes 3470 * @length: length of buffer, must be multiple of 4 bytes
3471 * @timeout: time in ms to wait for command completion
3472 * @return_data: read and return data from the buffer (true) or not (false)
3473 * Needed because FW structures are big endian and decoding of
3474 * these fields can be 8 bit or 16 bit based on command. Decoding
3475 * is not easily understood without making a table of commands.
3476 * So we will leave this up to the caller to read back the data
3477 * in these cases.
3449 * 3478 *
3450 * Communicates with the manageability block. On success return 0 3479 * Communicates with the manageability block. On success return 0
3451 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. 3480 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3452 **/ 3481 **/
3453static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 3482s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3454 u32 length) 3483 u32 length, u32 timeout,
3484 bool return_data)
3455{ 3485{
3456 u32 hicr, i, bi; 3486 u32 hicr, i, bi, fwsts;
3457 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3487 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3458 u8 buf_len, dword_len; 3488 u16 buf_len, dword_len;
3459 3489
3460 if (length == 0 || length & 0x3 || 3490 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3461 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { 3491 hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
3462 hw_dbg(hw, "Buffer length failure.\n");
3463 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3492 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3464 } 3493 }
3465 3494
3495 /* Set bit 9 of FWSTS clearing FW reset indication */
3496 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3497 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3498
3466 /* Check that the host interface is enabled. */ 3499 /* Check that the host interface is enabled. */
3467 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3500 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3468 if ((hicr & IXGBE_HICR_EN) == 0) { 3501 if ((hicr & IXGBE_HICR_EN) == 0) {
@@ -3470,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3470 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3503 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3471 } 3504 }
3472 3505
3473 /* Calculate length in DWORDs */ 3506 /* Calculate length in DWORDs. We must be DWORD aligned */
3507 if ((length % (sizeof(u32))) != 0) {
3508 hw_dbg(hw, "Buffer length failure, not aligned to dword");
3509 return IXGBE_ERR_INVALID_ARGUMENT;
3510 }
3511
3474 dword_len = length >> 2; 3512 dword_len = length >> 2;
3475 3513
3476 /* 3514 /*
@@ -3484,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3484 /* Setting this bit tells the ARC that a new command is pending. */ 3522 /* Setting this bit tells the ARC that a new command is pending. */
3485 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); 3523 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3486 3524
3487 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { 3525 for (i = 0; i < timeout; i++) {
3488 hicr = IXGBE_READ_REG(hw, IXGBE_HICR); 3526 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3489 if (!(hicr & IXGBE_HICR_C)) 3527 if (!(hicr & IXGBE_HICR_C))
3490 break; 3528 break;
@@ -3492,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3492 } 3530 }
3493 3531
3494 /* Check command successful completion. */ 3532 /* Check command successful completion. */
3495 if (i == IXGBE_HI_COMMAND_TIMEOUT || 3533 if ((timeout != 0 && i == timeout) ||
3496 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { 3534 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3497 hw_dbg(hw, "Command has failed with no status valid.\n"); 3535 hw_dbg(hw, "Command has failed with no status valid.\n");
3498 return IXGBE_ERR_HOST_INTERFACE_COMMAND; 3536 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3499 } 3537 }
3500 3538
3539 if (!return_data)
3540 return 0;
3541
3501 /* Calculate length in DWORDs */ 3542 /* Calculate length in DWORDs */
3502 dword_len = hdr_size >> 2; 3543 dword_len = hdr_size >> 2;
3503 3544
@@ -3568,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3568 3609
3569 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { 3610 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3570 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, 3611 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
3571 sizeof(fw_cmd)); 3612 sizeof(fw_cmd),
3613 IXGBE_HI_COMMAND_TIMEOUT,
3614 true);
3572 if (ret_val != 0) 3615 if (ret_val != 0)
3573 continue; 3616 continue;
3574 3617
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2ae5d4b8fc93..8cfadcb2676e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
64 u16 *data); 64 u16 *data);
65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 65s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
66 u16 words, u16 *data); 66 u16 words, u16 *data);
67u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); 67s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 68s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
69 u16 *checksum_val); 69 u16 *checksum_val);
70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 70s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
84bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); 84bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
85void ixgbe_fc_autoneg(struct ixgbe_hw *hw); 85void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
86 86
87s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 87s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
88void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 88void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
89s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); 89s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
90s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 90s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
91s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); 91s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
@@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
110s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); 110s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
111s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 111s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
112 u8 build, u8 ver); 112 u8 build, u8 ver);
113s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
114 u32 length, u32 timeout, bool return_data);
113void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); 115void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
114bool ixgbe_mng_enabled(struct ixgbe_hw *hw); 116bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
115 117
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 26fd85e2bca5..e5be0dd508de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2927,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
2927 max_combined = IXGBE_MAX_FDIR_INDICES; 2927 max_combined = IXGBE_MAX_FDIR_INDICES;
2928 } else { 2928 } else {
2929 /* support up to 16 queues with RSS */ 2929 /* support up to 16 queues with RSS */
2930 max_combined = IXGBE_MAX_RSS_INDICES; 2930 max_combined = ixgbe_max_rss_indices(adapter);
2931 } 2931 }
2932 2932
2933 return max_combined; 2933 return max_combined;
@@ -2975,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev,
2975{ 2975{
2976 struct ixgbe_adapter *adapter = netdev_priv(dev); 2976 struct ixgbe_adapter *adapter = netdev_priv(dev);
2977 unsigned int count = ch->combined_count; 2977 unsigned int count = ch->combined_count;
2978 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
2978 2979
2979 /* verify they are not requesting separate vectors */ 2980 /* verify they are not requesting separate vectors */
2980 if (!count || ch->rx_count || ch->tx_count) 2981 if (!count || ch->rx_count || ch->tx_count)
@@ -2991,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev,
2991 /* update feature limits from largest to smallest supported values */ 2992 /* update feature limits from largest to smallest supported values */
2992 adapter->ring_feature[RING_F_FDIR].limit = count; 2993 adapter->ring_feature[RING_F_FDIR].limit = count;
2993 2994
2994 /* cap RSS limit at 16 */ 2995 /* cap RSS limit */
2995 if (count > IXGBE_MAX_RSS_INDICES) 2996 if (count > max_rss_indices)
2996 count = IXGBE_MAX_RSS_INDICES; 2997 count = max_rss_indices;
2997 adapter->ring_feature[RING_F_RSS].limit = count; 2998 adapter->ring_feature[RING_F_RSS].limit = count;
2998 2999
2999#ifdef IXGBE_FCOE 3000#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9afa167d52a6..82d418729dd4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -42,6 +42,7 @@
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <net/checksum.h> 43#include <net/checksum.h>
44#include <net/ip6_checksum.h> 44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
45#include <linux/ethtool.h> 46#include <linux/ethtool.h>
46#include <linux/if.h> 47#include <linux/if.h>
47#include <linux/if_vlan.h> 48#include <linux/if_vlan.h>
@@ -50,6 +51,15 @@
50#include <linux/prefetch.h> 51#include <linux/prefetch.h>
51#include <scsi/fc/fc_fcoe.h> 52#include <scsi/fc/fc_fcoe.h>
52 53
54#ifdef CONFIG_OF
55#include <linux/of_net.h>
56#endif
57
58#ifdef CONFIG_SPARC
59#include <asm/idprom.h>
60#include <asm/prom.h>
61#endif
62
53#include "ixgbe.h" 63#include "ixgbe.h"
54#include "ixgbe_common.h" 64#include "ixgbe_common.h"
55#include "ixgbe_dcb_82599.h" 65#include "ixgbe_dcb_82599.h"
@@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] =
65static char ixgbe_default_device_descr[] = 75static char ixgbe_default_device_descr[] =
66 "Intel(R) 10 Gigabit Network Connection"; 76 "Intel(R) 10 Gigabit Network Connection";
67#endif 77#endif
68#define DRV_VERSION "3.19.1-k" 78#define DRV_VERSION "4.0.1-k"
69const char ixgbe_driver_version[] = DRV_VERSION; 79const char ixgbe_driver_version[] = DRV_VERSION;
70static const char ixgbe_copyright[] = 80static const char ixgbe_copyright[] =
71 "Copyright (c) 1999-2014 Intel Corporation."; 81 "Copyright (c) 1999-2014 Intel Corporation.";
72 82
73static const struct ixgbe_info *ixgbe_info_tbl[] = { 83static const struct ixgbe_info *ixgbe_info_tbl[] = {
74 [board_82598] = &ixgbe_82598_info, 84 [board_82598] = &ixgbe_82598_info,
75 [board_82599] = &ixgbe_82599_info, 85 [board_82599] = &ixgbe_82599_info,
76 [board_X540] = &ixgbe_X540_info, 86 [board_X540] = &ixgbe_X540_info,
87 [board_X550] = &ixgbe_X550_info,
88 [board_X550EM_x] = &ixgbe_X550EM_x_info,
77}; 89};
78 90
79/* ixgbe_pci_tbl - PCI Device ID Table 91/* ixgbe_pci_tbl - PCI Device ID Table
@@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, 127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, 128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, 129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
118 /* required last entry */ 133 /* required last entry */
119 {0, } 134 {0, }
120}; 135};
@@ -1416,40 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1416 skb->ip_summed = CHECKSUM_UNNECESSARY; 1431 skb->ip_summed = CHECKSUM_UNNECESSARY;
1417} 1432}
1418 1433
1419static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1420{
1421 rx_ring->next_to_use = val;
1422
1423 /* update next to alloc since we have filled the ring */
1424 rx_ring->next_to_alloc = val;
1425 /*
1426 * Force memory writes to complete before letting h/w
1427 * know there are new descriptors to fetch. (Only
1428 * applicable for weak-ordered memory model archs,
1429 * such as IA-64).
1430 */
1431 wmb();
1432 ixgbe_write_tail(rx_ring, val);
1433}
1434
1435static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1434static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1436 struct ixgbe_rx_buffer *bi) 1435 struct ixgbe_rx_buffer *bi)
1437{ 1436{
1438 struct page *page = bi->page; 1437 struct page *page = bi->page;
1439 dma_addr_t dma = bi->dma; 1438 dma_addr_t dma;
1440 1439
1441 /* since we are recycling buffers we should seldom need to alloc */ 1440 /* since we are recycling buffers we should seldom need to alloc */
1442 if (likely(dma)) 1441 if (likely(page))
1443 return true; 1442 return true;
1444 1443
1445 /* alloc new page for storage */ 1444 /* alloc new page for storage */
1446 if (likely(!page)) { 1445 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1447 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); 1446 if (unlikely(!page)) {
1448 if (unlikely(!page)) { 1447 rx_ring->rx_stats.alloc_rx_page_failed++;
1449 rx_ring->rx_stats.alloc_rx_page_failed++; 1448 return false;
1450 return false;
1451 }
1452 bi->page = page;
1453 } 1449 }
1454 1450
1455 /* map page for use */ 1451 /* map page for use */
@@ -1462,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1462 */ 1458 */
1463 if (dma_mapping_error(rx_ring->dev, dma)) { 1459 if (dma_mapping_error(rx_ring->dev, dma)) {
1464 __free_pages(page, ixgbe_rx_pg_order(rx_ring)); 1460 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1465 bi->page = NULL;
1466 1461
1467 rx_ring->rx_stats.alloc_rx_page_failed++; 1462 rx_ring->rx_stats.alloc_rx_page_failed++;
1468 return false; 1463 return false;
1469 } 1464 }
1470 1465
1471 bi->dma = dma; 1466 bi->dma = dma;
1467 bi->page = page;
1472 bi->page_offset = 0; 1468 bi->page_offset = 0;
1473 1469
1474 return true; 1470 return true;
@@ -1512,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1512 i -= rx_ring->count; 1508 i -= rx_ring->count;
1513 } 1509 }
1514 1510
1515 /* clear the hdr_addr for the next_to_use descriptor */ 1511 /* clear the status bits for the next_to_use descriptor */
1516 rx_desc->read.hdr_addr = 0; 1512 rx_desc->wb.upper.status_error = 0;
1517 1513
1518 cleaned_count--; 1514 cleaned_count--;
1519 } while (cleaned_count); 1515 } while (cleaned_count);
1520 1516
1521 i += rx_ring->count; 1517 i += rx_ring->count;
1522 1518
1523 if (rx_ring->next_to_use != i) 1519 if (rx_ring->next_to_use != i) {
1524 ixgbe_release_rx_desc(rx_ring, i); 1520 rx_ring->next_to_use = i;
1521
1522 /* update next to alloc since we have filled the ring */
1523 rx_ring->next_to_alloc = i;
1524
1525 /* Force memory writes to complete before letting h/w
1526 * know there are new descriptors to fetch. (Only
1527 * applicable for weak-ordered memory model archs,
1528 * such as IA-64).
1529 */
1530 wmb();
1531 writel(i, rx_ring->tail);
1532 }
1525} 1533}
1526 1534
1527static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1535static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
@@ -1798,9 +1806,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1798 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 1806 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1799 1807
1800 /* transfer page from old buffer to new buffer */ 1808 /* transfer page from old buffer to new buffer */
1801 new_buff->page = old_buff->page; 1809 *new_buff = *old_buff;
1802 new_buff->dma = old_buff->dma;
1803 new_buff->page_offset = old_buff->page_offset;
1804 1810
1805 /* sync the buffer for use by the device */ 1811 /* sync the buffer for use by the device */
1806 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 1812 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
@@ -1809,6 +1815,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1809 DMA_FROM_DEVICE); 1815 DMA_FROM_DEVICE);
1810} 1816}
1811 1817
1818static inline bool ixgbe_page_is_reserved(struct page *page)
1819{
1820 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
1821}
1822
1812/** 1823/**
1813 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff 1824 * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1814 * @rx_ring: rx descriptor ring to transact packets on 1825 * @rx_ring: rx descriptor ring to transact packets on
@@ -1844,12 +1855,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1844 1855
1845 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 1856 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1846 1857
1847 /* we can reuse buffer as-is, just make sure it is local */ 1858 /* page is not reserved, we can reuse buffer as-is */
1848 if (likely(page_to_nid(page) == numa_node_id())) 1859 if (likely(!ixgbe_page_is_reserved(page)))
1849 return true; 1860 return true;
1850 1861
1851 /* this page cannot be reused so discard it */ 1862 /* this page cannot be reused so discard it */
1852 put_page(page); 1863 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1853 return false; 1864 return false;
1854 } 1865 }
1855 1866
@@ -1857,7 +1868,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1857 rx_buffer->page_offset, size, truesize); 1868 rx_buffer->page_offset, size, truesize);
1858 1869
1859 /* avoid re-using remote pages */ 1870 /* avoid re-using remote pages */
1860 if (unlikely(page_to_nid(page) != numa_node_id())) 1871 if (unlikely(ixgbe_page_is_reserved(page)))
1861 return false; 1872 return false;
1862 1873
1863#if (PAGE_SIZE < 8192) 1874#if (PAGE_SIZE < 8192)
@@ -1867,22 +1878,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1867 1878
1868 /* flip page offset to other buffer */ 1879 /* flip page offset to other buffer */
1869 rx_buffer->page_offset ^= truesize; 1880 rx_buffer->page_offset ^= truesize;
1870
1871 /* Even if we own the page, we are not allowed to use atomic_set()
1872 * This would break get_page_unless_zero() users.
1873 */
1874 atomic_inc(&page->_count);
1875#else 1881#else
1876 /* move offset up to the next cache line */ 1882 /* move offset up to the next cache line */
1877 rx_buffer->page_offset += truesize; 1883 rx_buffer->page_offset += truesize;
1878 1884
1879 if (rx_buffer->page_offset > last_offset) 1885 if (rx_buffer->page_offset > last_offset)
1880 return false; 1886 return false;
1881
1882 /* bump ref count on page before it is given to the stack */
1883 get_page(page);
1884#endif 1887#endif
1885 1888
1889 /* Even if we own the page, we are not allowed to use atomic_set()
1890 * This would break get_page_unless_zero() users.
1891 */
1892 atomic_inc(&page->_count);
1893
1886 return true; 1894 return true;
1887} 1895}
1888 1896
@@ -1945,6 +1953,8 @@ dma_sync:
1945 rx_buffer->page_offset, 1953 rx_buffer->page_offset,
1946 ixgbe_rx_bufsz(rx_ring), 1954 ixgbe_rx_bufsz(rx_ring),
1947 DMA_FROM_DEVICE); 1955 DMA_FROM_DEVICE);
1956
1957 rx_buffer->skb = NULL;
1948 } 1958 }
1949 1959
1950 /* pull page into skb */ 1960 /* pull page into skb */
@@ -1962,8 +1972,6 @@ dma_sync:
1962 } 1972 }
1963 1973
1964 /* clear contents of buffer_info */ 1974 /* clear contents of buffer_info */
1965 rx_buffer->skb = NULL;
1966 rx_buffer->dma = 0;
1967 rx_buffer->page = NULL; 1975 rx_buffer->page = NULL;
1968 1976
1969 return skb; 1977 return skb;
@@ -3214,7 +3222,9 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
3214 struct ixgbe_hw *hw = &adapter->hw; 3222 struct ixgbe_hw *hw = &adapter->hw;
3215 u32 reta = 0; 3223 u32 reta = 0;
3216 int i, j; 3224 int i, j;
3225 int reta_entries = 128;
3217 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; 3226 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3227 int indices_multi;
3218 3228
3219 /* 3229 /*
3220 * Program table for at least 2 queues w/ SR-IOV so that VFs can 3230 * Program table for at least 2 queues w/ SR-IOV so that VFs can
@@ -3228,22 +3238,67 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed)
3228 for (i = 0; i < 10; i++) 3238 for (i = 0; i < 10; i++)
3229 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); 3239 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
3230 3240
3241 /* Fill out the redirection table as follows:
3242 * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices
3243 * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index
3244 * X550: 512 (8 bit wide) entries containing 6 bit RSS index
3245 */
3246 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3247 indices_multi = 0x11;
3248 else
3249 indices_multi = 0x1;
3250
3251 switch (adapter->hw.mac.type) {
3252 case ixgbe_mac_X550:
3253 case ixgbe_mac_X550EM_x:
3254 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3255 reta_entries = 512;
3256 default:
3257 break;
3258 }
3259
3231 /* Fill out redirection table */ 3260 /* Fill out redirection table */
3232 for (i = 0, j = 0; i < 128; i++, j++) { 3261 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3233 if (j == rss_i) 3262 if (j == rss_i)
3234 j = 0; 3263 j = 0;
3235 /* reta = 4-byte sliding window of 3264 reta = (reta << 8) | (j * indices_multi);
3236 * 0x00..(indices-1)(indices-1)00..etc. */ 3265 if ((i & 3) == 3) {
3237 reta = (reta << 8) | (j * 0x11); 3266 if (i < 128)
3267 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3268 else
3269 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3270 reta);
3271 }
3272 }
3273}
3274
3275static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed)
3276{
3277 struct ixgbe_hw *hw = &adapter->hw;
3278 u32 vfreta = 0;
3279 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3280 unsigned int pf_pool = adapter->num_vfs;
3281 int i, j;
3282
3283 /* Fill out hash function seeds */
3284 for (i = 0; i < 10; i++)
3285 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]);
3286
3287 /* Fill out the redirection table */
3288 for (i = 0, j = 0; i < 64; i++, j++) {
3289 if (j == rss_i)
3290 j = 0;
3291 vfreta = (vfreta << 8) | j;
3238 if ((i & 3) == 3) 3292 if ((i & 3) == 3)
3239 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3293 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3294 vfreta);
3240 } 3295 }
3241} 3296}
3242 3297
3243static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 3298static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3244{ 3299{
3245 struct ixgbe_hw *hw = &adapter->hw; 3300 struct ixgbe_hw *hw = &adapter->hw;
3246 u32 mrqc = 0, rss_field = 0; 3301 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3247 u32 rss_key[10]; 3302 u32 rss_key[10];
3248 u32 rxcsum; 3303 u32 rxcsum;
3249 3304
@@ -3289,9 +3344,24 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3289 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 3344 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3290 3345
3291 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 3346 netdev_rss_key_fill(rss_key, sizeof(rss_key));
3292 ixgbe_setup_reta(adapter, rss_key); 3347 if ((hw->mac.type >= ixgbe_mac_X550) &&
3293 mrqc |= rss_field; 3348 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3294 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3349 unsigned int pf_pool = adapter->num_vfs;
3350
3351 /* Enable VF RSS mode */
3352 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3353 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3354
3355 /* Setup RSS through the VF registers */
3356 ixgbe_setup_vfreta(adapter, rss_key);
3357 vfmrqc = IXGBE_MRQC_RSSEN;
3358 vfmrqc |= rss_field;
3359 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3360 } else {
3361 ixgbe_setup_reta(adapter, rss_key);
3362 mrqc |= rss_field;
3363 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3364 }
3295} 3365}
3296 3366
3297/** 3367/**
@@ -4344,29 +4414,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4344 4414
4345 /* Free all the Rx ring sk_buffs */ 4415 /* Free all the Rx ring sk_buffs */
4346 for (i = 0; i < rx_ring->count; i++) { 4416 for (i = 0; i < rx_ring->count; i++) {
4347 struct ixgbe_rx_buffer *rx_buffer; 4417 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4348 4418
4349 rx_buffer = &rx_ring->rx_buffer_info[i];
4350 if (rx_buffer->skb) { 4419 if (rx_buffer->skb) {
4351 struct sk_buff *skb = rx_buffer->skb; 4420 struct sk_buff *skb = rx_buffer->skb;
4352 if (IXGBE_CB(skb)->page_released) { 4421 if (IXGBE_CB(skb)->page_released)
4353 dma_unmap_page(dev, 4422 dma_unmap_page(dev,
4354 IXGBE_CB(skb)->dma, 4423 IXGBE_CB(skb)->dma,
4355 ixgbe_rx_bufsz(rx_ring), 4424 ixgbe_rx_bufsz(rx_ring),
4356 DMA_FROM_DEVICE); 4425 DMA_FROM_DEVICE);
4357 IXGBE_CB(skb)->page_released = false;
4358 }
4359 dev_kfree_skb(skb); 4426 dev_kfree_skb(skb);
4360 rx_buffer->skb = NULL; 4427 rx_buffer->skb = NULL;
4361 } 4428 }
4362 if (rx_buffer->dma) 4429
4363 dma_unmap_page(dev, rx_buffer->dma, 4430 if (!rx_buffer->page)
4364 ixgbe_rx_pg_size(rx_ring), 4431 continue;
4365 DMA_FROM_DEVICE); 4432
4366 rx_buffer->dma = 0; 4433 dma_unmap_page(dev, rx_buffer->dma,
4367 if (rx_buffer->page) 4434 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4368 __free_pages(rx_buffer->page, 4435 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4369 ixgbe_rx_pg_order(rx_ring)); 4436
4370 rx_buffer->page = NULL; 4437 rx_buffer->page = NULL;
4371 } 4438 }
4372 4439
@@ -5056,7 +5123,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5056 hw->subsystem_device_id = pdev->subsystem_device; 5123 hw->subsystem_device_id = pdev->subsystem_device;
5057 5124
5058 /* Set common capability flags and settings */ 5125 /* Set common capability flags and settings */
5059 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 5126 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5060 adapter->ring_feature[RING_F_RSS].limit = rss; 5127 adapter->ring_feature[RING_F_RSS].limit = rss;
5061 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5128 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5062 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5129 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -6318,6 +6385,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6318 } 6385 }
6319} 6386}
6320 6387
6388#ifdef CONFIG_PCI_IOV
6389static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
6390 struct pci_dev *vfdev)
6391{
6392 if (!pci_wait_for_pending_transaction(vfdev))
6393 e_dev_warn("Issuing VFLR with pending transactions\n");
6394
6395 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
6396 pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
6397
6398 msleep(100);
6399}
6400
6401static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6402{
6403 struct ixgbe_hw *hw = &adapter->hw;
6404 struct pci_dev *pdev = adapter->pdev;
6405 struct pci_dev *vfdev;
6406 u32 gpc;
6407 int pos;
6408 unsigned short vf_id;
6409
6410 if (!(netif_carrier_ok(adapter->netdev)))
6411 return;
6412
6413 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6414 if (gpc) /* If incrementing then no need for the check below */
6415 return;
6416 /* Check to see if a bad DMA write target from an errant or
6417 * malicious VF has caused a PCIe error. If so then we can
6418 * issue a VFLR to the offending VF(s) and then resume without
6419 * requesting a full slot reset.
6420 */
6421
6422 if (!pdev)
6423 return;
6424
6425 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6426 if (!pos)
6427 return;
6428
6429 /* get the device ID for the VF */
6430 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
6431
6432 /* check status reg for all VFs owned by this PF */
6433 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
6434 while (vfdev) {
6435 if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
6436 u16 status_reg;
6437
6438 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
6439 if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
6440 /* issue VFLR */
6441 ixgbe_issue_vf_flr(adapter, vfdev);
6442 }
6443
6444 vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
6445 }
6446}
6447
6321static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) 6448static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6322{ 6449{
6323 u32 ssvpc; 6450 u32 ssvpc;
@@ -6338,6 +6465,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6338 6465
6339 e_warn(drv, "%u Spoofed packets detected\n", ssvpc); 6466 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6340} 6467}
6468#else
6469static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
6470{
6471}
6472
6473static void
6474ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
6475{
6476}
6477#endif /* CONFIG_PCI_IOV */
6478
6341 6479
6342/** 6480/**
6343 * ixgbe_watchdog_subtask - check and bring link up 6481 * ixgbe_watchdog_subtask - check and bring link up
@@ -6358,6 +6496,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6358 else 6496 else
6359 ixgbe_watchdog_link_is_down(adapter); 6497 ixgbe_watchdog_link_is_down(adapter);
6360 6498
6499 ixgbe_check_for_bad_vf(adapter);
6361 ixgbe_spoof_check(adapter); 6500 ixgbe_spoof_check(adapter);
6362 ixgbe_update_stats(adapter); 6501 ixgbe_update_stats(adapter);
6363 6502
@@ -6469,51 +6608,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6469 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); 6608 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6470} 6609}
6471 6610
6472#ifdef CONFIG_PCI_IOV
6473static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6474{
6475 int vf;
6476 struct ixgbe_hw *hw = &adapter->hw;
6477 struct net_device *netdev = adapter->netdev;
6478 u32 gpc;
6479 u32 ciaa, ciad;
6480
6481 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6482 if (gpc) /* If incrementing then no need for the check below */
6483 return;
6484 /*
6485 * Check to see if a bad DMA write target from an errant or
6486 * malicious VF has caused a PCIe error. If so then we can
6487 * issue a VFLR to the offending VF(s) and then resume without
6488 * requesting a full slot reset.
6489 */
6490
6491 for (vf = 0; vf < adapter->num_vfs; vf++) {
6492 ciaa = (vf << 16) | 0x80000000;
6493 /* 32 bit read so align, we really want status at offset 6 */
6494 ciaa |= PCI_COMMAND;
6495 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6496 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_BY_MAC(hw));
6497 ciaa &= 0x7FFFFFFF;
6498 /* disable debug mode asap after reading data */
6499 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6500 /* Get the upper 16 bits which will be the PCI status reg */
6501 ciad >>= 16;
6502 if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
6503 netdev_err(netdev, "VF %d Hung DMA\n", vf);
6504 /* Issue VFLR */
6505 ciaa = (vf << 16) | 0x80000000;
6506 ciaa |= 0xA8;
6507 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6508 ciad = 0x00008000; /* VFLR */
6509 IXGBE_WRITE_REG(hw, IXGBE_CIAD_BY_MAC(hw), ciad);
6510 ciaa &= 0x7FFFFFFF;
6511 IXGBE_WRITE_REG(hw, IXGBE_CIAA_BY_MAC(hw), ciaa);
6512 }
6513 }
6514}
6515
6516#endif
6517/** 6611/**
6518 * ixgbe_service_timer - Timer Call-back 6612 * ixgbe_service_timer - Timer Call-back
6519 * @data: pointer to adapter cast into an unsigned long 6613 * @data: pointer to adapter cast into an unsigned long
@@ -6522,7 +6616,6 @@ static void ixgbe_service_timer(unsigned long data)
6522{ 6616{
6523 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 6617 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6524 unsigned long next_event_offset; 6618 unsigned long next_event_offset;
6525 bool ready = true;
6526 6619
6527 /* poll faster when waiting for link */ 6620 /* poll faster when waiting for link */
6528 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) 6621 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
@@ -6530,32 +6623,10 @@ static void ixgbe_service_timer(unsigned long data)
6530 else 6623 else
6531 next_event_offset = HZ * 2; 6624 next_event_offset = HZ * 2;
6532 6625
6533#ifdef CONFIG_PCI_IOV
6534 /*
6535 * don't bother with SR-IOV VF DMA hang check if there are
6536 * no VFs or the link is down
6537 */
6538 if (!adapter->num_vfs ||
6539 (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6540 goto normal_timer_service;
6541
6542 /* If we have VFs allocated then we must check for DMA hangs */
6543 ixgbe_check_for_bad_vf(adapter);
6544 next_event_offset = HZ / 50;
6545 adapter->timer_event_accumulator++;
6546
6547 if (adapter->timer_event_accumulator >= 100)
6548 adapter->timer_event_accumulator = 0;
6549 else
6550 ready = false;
6551
6552normal_timer_service:
6553#endif
6554 /* Reset the timer */ 6626 /* Reset the timer */
6555 mod_timer(&adapter->service_timer, next_event_offset + jiffies); 6627 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6556 6628
6557 if (ready) 6629 ixgbe_service_event_schedule(adapter);
6558 ixgbe_service_event_schedule(adapter);
6559} 6630}
6560 6631
6561static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) 6632static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
@@ -6960,8 +7031,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6960 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); 7031 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6961 7032
6962 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { 7033 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
6963 /* notify HW of packet */ 7034 writel(i, tx_ring->tail);
6964 ixgbe_write_tail(tx_ring, i); 7035
7036 /* we need this if more than one processor can write to our tail
7037 * at a time, it synchronizes IO on IA64/Altix systems
7038 */
7039 mmiowb();
6965 } 7040 }
6966 7041
6967 return; 7042 return;
@@ -8027,6 +8102,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8027} 8102}
8028 8103
8029/** 8104/**
8105 * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
8106 * @adapter: Pointer to adapter struct
8107 */
8108static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
8109{
8110#ifdef CONFIG_OF
8111 struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
8112 struct ixgbe_hw *hw = &adapter->hw;
8113 const unsigned char *addr;
8114
8115 addr = of_get_mac_address(dp);
8116 if (addr) {
8117 ether_addr_copy(hw->mac.perm_addr, addr);
8118 return;
8119 }
8120#endif /* CONFIG_OF */
8121
8122#ifdef CONFIG_SPARC
8123 ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
8124#endif /* CONFIG_SPARC */
8125}
8126
8127/**
8030 * ixgbe_probe - Device Initialization Routine 8128 * ixgbe_probe - Device Initialization Routine
8031 * @pdev: PCI device information struct 8129 * @pdev: PCI device information struct
8032 * @ent: entry in ixgbe_pci_tbl 8130 * @ent: entry in ixgbe_pci_tbl
@@ -8108,7 +8206,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8108 SET_NETDEV_DEV(netdev, &pdev->dev); 8206 SET_NETDEV_DEV(netdev, &pdev->dev);
8109 8207
8110 adapter = netdev_priv(netdev); 8208 adapter = netdev_priv(netdev);
8111 pci_set_drvdata(pdev, adapter);
8112 8209
8113 adapter->netdev = netdev; 8210 adapter->netdev = netdev;
8114 adapter->pdev = pdev; 8211 adapter->pdev = pdev;
@@ -8295,6 +8392,8 @@ skip_sriov:
8295 goto err_sw_init; 8392 goto err_sw_init;
8296 } 8393 }
8297 8394
8395 ixgbe_get_platform_mac_addr(adapter);
8396
8298 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 8397 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
8299 8398
8300 if (!is_valid_ether_addr(netdev->dev_addr)) { 8399 if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -8386,6 +8485,8 @@ skip_sriov:
8386 if (err) 8485 if (err)
8387 goto err_register; 8486 goto err_register;
8388 8487
8488 pci_set_drvdata(pdev, adapter);
8489
8389 /* power down the optics for 82599 SFP+ fiber */ 8490 /* power down the optics for 82599 SFP+ fiber */
8390 if (hw->mac.ops.disable_tx_laser) 8491 if (hw->mac.ops.disable_tx_laser)
8391 hw->mac.ops.disable_tx_laser(hw); 8492 hw->mac.ops.disable_tx_laser(hw);
@@ -8465,9 +8566,14 @@ err_dma:
8465static void ixgbe_remove(struct pci_dev *pdev) 8566static void ixgbe_remove(struct pci_dev *pdev)
8466{ 8567{
8467 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8568 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8468 struct net_device *netdev = adapter->netdev; 8569 struct net_device *netdev;
8469 bool disable_dev; 8570 bool disable_dev;
8470 8571
8572 /* if !adapter then we already cleaned up in probe */
8573 if (!adapter)
8574 return;
8575
8576 netdev = adapter->netdev;
8471 ixgbe_dbg_adapter_exit(adapter); 8577 ixgbe_dbg_adapter_exit(adapter);
8472 8578
8473 set_bit(__IXGBE_REMOVING, &adapter->state); 8579 set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -8614,8 +8720,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8614 * VFLR. Just clean up the AER in that case. 8720 * VFLR. Just clean up the AER in that case.
8615 */ 8721 */
8616 if (vfdev) { 8722 if (vfdev) {
8617 e_dev_err("Issuing VFLR to VF %d\n", vf); 8723 ixgbe_issue_vf_flr(adapter, vfdev);
8618 pci_write_config_dword(vfdev, 0xA8, 0x00008000);
8619 /* Free device reference count */ 8724 /* Free device reference count */
8620 pci_dev_put(vfdev); 8725 pci_dev_put(vfdev);
8621 } 8726 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index acafe391f0a3..8a2be444113d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -50,6 +50,188 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
50static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); 50static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
51 51
52/** 52/**
53 * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
54 * @hw: pointer to the hardware structure
55 * @byte: byte to send
56 *
57 * Returns an error code on error.
58 **/
59static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
60{
61 s32 status;
62
63 status = ixgbe_clock_out_i2c_byte(hw, byte);
64 if (status)
65 return status;
66 return ixgbe_get_i2c_ack(hw);
67}
68
69/**
70 * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
71 * @hw: pointer to the hardware structure
72 * @byte: pointer to a u8 to receive the byte
73 *
74 * Returns an error code on error.
75 **/
76static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
77{
78 s32 status;
79
80 status = ixgbe_clock_in_i2c_byte(hw, byte);
81 if (status)
82 return status;
83 /* ACK */
84 return ixgbe_clock_out_i2c_bit(hw, false);
85}
86
87/**
88 * ixgbe_ones_comp_byte_add - Perform one's complement addition
89 * @add1: addend 1
90 * @add2: addend 2
91 *
92 * Returns one's complement 8-bit sum.
93 **/
94static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
95{
96 u16 sum = add1 + add2;
97
98 sum = (sum & 0xFF) + (sum >> 8);
99 return sum & 0xFF;
100}
101
102/**
103 * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
104 * @hw: pointer to the hardware structure
105 * @addr: I2C bus address to read from
106 * @reg: I2C device register to read from
107 * @val: pointer to location to receive read value
108 *
109 * Returns an error code on error.
110 **/
111s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
112 u16 reg, u16 *val)
113{
114 u32 swfw_mask = hw->phy.phy_semaphore_mask;
115 int max_retry = 10;
116 int retry = 0;
117 u8 csum_byte;
118 u8 high_bits;
119 u8 low_bits;
120 u8 reg_high;
121 u8 csum;
122
123 reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
124 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
125 csum = ~csum;
126 do {
127 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
128 return IXGBE_ERR_SWFW_SYNC;
129 ixgbe_i2c_start(hw);
130 /* Device Address and write indication */
131 if (ixgbe_out_i2c_byte_ack(hw, addr))
132 goto fail;
133 /* Write bits 14:8 */
134 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
135 goto fail;
136 /* Write bits 7:0 */
137 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
138 goto fail;
139 /* Write csum */
140 if (ixgbe_out_i2c_byte_ack(hw, csum))
141 goto fail;
142 /* Re-start condition */
143 ixgbe_i2c_start(hw);
144 /* Device Address and read indication */
145 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
146 goto fail;
147 /* Get upper bits */
148 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
149 goto fail;
150 /* Get low bits */
151 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
152 goto fail;
153 /* Get csum */
154 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
155 goto fail;
156 /* NACK */
157 if (ixgbe_clock_out_i2c_bit(hw, false))
158 goto fail;
159 ixgbe_i2c_stop(hw);
160 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
161 *val = (high_bits << 8) | low_bits;
162 return 0;
163
164fail:
165 ixgbe_i2c_bus_clear(hw);
166 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
167 retry++;
168 if (retry < max_retry)
169 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
170 else
171 hw_dbg(hw, "I2C byte read combined error.\n");
172 } while (retry < max_retry);
173
174 return IXGBE_ERR_I2C;
175}
176
177/**
178 * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
179 * @hw: pointer to the hardware structure
180 * @addr: I2C bus address to write to
181 * @reg: I2C device register to write to
182 * @val: value to write
183 *
184 * Returns an error code on error.
185 **/
186s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
187 u8 addr, u16 reg, u16 val)
188{
189 int max_retry = 1;
190 int retry = 0;
191 u8 reg_high;
192 u8 csum;
193
194 reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
195 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
196 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
197 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
198 csum = ~csum;
199 do {
200 ixgbe_i2c_start(hw);
201 /* Device Address and write indication */
202 if (ixgbe_out_i2c_byte_ack(hw, addr))
203 goto fail;
204 /* Write bits 14:8 */
205 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
206 goto fail;
207 /* Write bits 7:0 */
208 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
209 goto fail;
210 /* Write data 15:8 */
211 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
212 goto fail;
213 /* Write data 7:0 */
214 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
215 goto fail;
216 /* Write csum */
217 if (ixgbe_out_i2c_byte_ack(hw, csum))
218 goto fail;
219 ixgbe_i2c_stop(hw);
220 return 0;
221
222fail:
223 ixgbe_i2c_bus_clear(hw);
224 retry++;
225 if (retry < max_retry)
226 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
227 else
228 hw_dbg(hw, "I2C byte write combined error.\n");
229 } while (retry < max_retry);
230
231 return IXGBE_ERR_I2C;
232}
233
234/**
53 * ixgbe_identify_phy_generic - Get physical layer module 235 * ixgbe_identify_phy_generic - Get physical layer module
54 * @hw: pointer to hardware structure 236 * @hw: pointer to hardware structure
55 * 237 *
@@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
60 u32 phy_addr; 242 u32 phy_addr;
61 u16 ext_ability = 0; 243 u16 ext_ability = 0;
62 244
245 if (!hw->phy.phy_semaphore_mask) {
246 hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
247 IXGBE_STATUS_LAN_ID_1;
248 if (hw->phy.lan_id)
249 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
250 else
251 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
252 }
253
63 if (hw->phy.type == ixgbe_phy_unknown) { 254 if (hw->phy.type == ixgbe_phy_unknown) {
64 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 255 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
65 hw->phy.mdio.prtad = phy_addr; 256 hw->phy.mdio.prtad = phy_addr;
@@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
315 u32 device_type, u16 *phy_data) 506 u32 device_type, u16 *phy_data)
316{ 507{
317 s32 status; 508 s32 status;
318 u16 gssr; 509 u32 gssr = hw->phy.phy_semaphore_mask;
319
320 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
321 gssr = IXGBE_GSSR_PHY1_SM;
322 else
323 gssr = IXGBE_GSSR_PHY0_SM;
324 510
325 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { 511 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
326 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, 512 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
418 u32 device_type, u16 phy_data) 604 u32 device_type, u16 phy_data)
419{ 605{
420 s32 status; 606 s32 status;
421 u16 gssr; 607 u32 gssr;
422 608
423 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 609 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
424 gssr = IXGBE_GSSR_PHY1_SM; 610 gssr = IXGBE_GSSR_PHY1_SM;
@@ -1469,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1469 s32 status; 1655 s32 status;
1470 u32 max_retry = 10; 1656 u32 max_retry = 10;
1471 u32 retry = 0; 1657 u32 retry = 0;
1472 u16 swfw_mask = 0; 1658 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1473 bool nack = true; 1659 bool nack = true;
1474 *data = 0; 1660 *data = 0;
1475 1661
1476 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1477 swfw_mask = IXGBE_GSSR_PHY1_SM;
1478 else
1479 swfw_mask = IXGBE_GSSR_PHY0_SM;
1480
1481 do { 1662 do {
1482 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) 1663 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1483 return IXGBE_ERR_SWFW_SYNC; 1664 return IXGBE_ERR_SWFW_SYNC;
@@ -1555,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
1555 s32 status; 1736 s32 status;
1556 u32 max_retry = 1; 1737 u32 max_retry = 1;
1557 u32 retry = 0; 1738 u32 retry = 0;
1558 u16 swfw_mask = 0; 1739 u32 swfw_mask = hw->phy.phy_semaphore_mask;
1559
1560 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1561 swfw_mask = IXGBE_GSSR_PHY1_SM;
1562 else
1563 swfw_mask = IXGBE_GSSR_PHY0_SM;
1564 1740
1565 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) 1741 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
1566 return IXGBE_ERR_SWFW_SYNC; 1742 return IXGBE_ERR_SWFW_SYNC;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 54071ed17e3b..434643881287 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -77,6 +77,11 @@
77#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 77#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
78#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 78#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
79#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 79#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
80#define IXGBE_CS4227 0xBE /* CS4227 address */
81#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
82#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
83#define IXGBE_CS4227_EDC_MODE_SR 0x0004
84
80/* Flow control defines */ 85/* Flow control defines */
81#define IXGBE_TAF_SYM_PAUSE 0x400 86#define IXGBE_TAF_SYM_PAUSE 0x400
82#define IXGBE_TAF_ASM_PAUSE 0x800 87#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -110,7 +115,6 @@
110/* SFP+ SFF-8472 Compliance code */ 115/* SFP+ SFF-8472 Compliance code */
111#define IXGBE_SFF_SFF_8472_UNSUP 0x00 116#define IXGBE_SFF_SFF_8472_UNSUP 0x00
112 117
113s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
114s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 118s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
115s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); 119s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
116s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 120s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
157 u8 *sff8472_data); 161 u8 *sff8472_data);
158s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, 162s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
159 u8 eeprom_data); 163 u8 eeprom_data);
164s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
165 u16 reg, u16 *val);
166s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
167 u16 reg, u16 val);
160#endif /* _IXGBE_PHY_H_ */ 168#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 04eee7c7b653..c76ba90ecc6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
221 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { 221 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
222 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; 222 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
223 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 223 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
224 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); 224 rss = min_t(int, ixgbe_max_rss_indices(adapter),
225 num_online_cpus());
225 } else { 226 } else {
226 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); 227 rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
227 } 228 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 64de20d1de56..d101b25dc4b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -74,6 +74,17 @@
74#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 74#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
75#define IXGBE_DEV_ID_X540T1 0x1560 75#define IXGBE_DEV_ID_X540T1 0x1560
76 76
77#define IXGBE_DEV_ID_X550T 0x1563
78#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
79#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
80#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
81#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
82#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
83#define IXGBE_DEV_ID_X550_VF_HV 0x1564
84#define IXGBE_DEV_ID_X550_VF 0x1565
85#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
86#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
87
77/* VF Device IDs */ 88/* VF Device IDs */
78#define IXGBE_DEV_ID_82599_VF 0x10ED 89#define IXGBE_DEV_ID_82599_VF 0x10ED
79#define IXGBE_DEV_ID_X540_VF 0x1515 90#define IXGBE_DEV_ID_X540_VF 0x1515
@@ -297,6 +308,7 @@ struct ixgbe_thermal_sensor_data {
297#define IXGBE_IMIRVP 0x05AC0 308#define IXGBE_IMIRVP 0x05AC0
298#define IXGBE_VMD_CTL 0x0581C 309#define IXGBE_VMD_CTL 0x0581C
299#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 310#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
311#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
300#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 312#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
301 313
302/* Registers for setting up RSS on X550 with SRIOV 314/* Registers for setting up RSS on X550 with SRIOV
@@ -740,6 +752,24 @@ struct ixgbe_thermal_sensor_data {
740#define IXGBE_LDPCECL 0x0E820 752#define IXGBE_LDPCECL 0x0E820
741#define IXGBE_LDPCECH 0x0E821 753#define IXGBE_LDPCECH 0x0E821
742 754
755/* MII clause 22/28 definitions */
756#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
757
758#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */
759#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */
760
761#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
762
763#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
764#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
765#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
766#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
767#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */
768#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */
769#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
770#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
771#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
772
743/* Management */ 773/* Management */
744#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ 774#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
745#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ 775#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1141,6 +1171,13 @@ struct ixgbe_thermal_sensor_data {
1141 1171
1142/* MDIO definitions */ 1172/* MDIO definitions */
1143 1173
1174#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
1175#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
1176#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
1177#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
1178#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
1179#define IXGBE_TWINAX_DEV 1
1180
1144#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ 1181#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
1145 1182
1146#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ 1183#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
@@ -1150,9 +1187,23 @@ struct ixgbe_thermal_sensor_data {
1150#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 1187#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018
1151#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 1188#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010
1152 1189
1153#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ 1190#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
1154#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ 1191#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
1155#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ 1192#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
1193#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
1194#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
1195#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
1196
1197#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
1198#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
1199#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
1200#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
1201
1202#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
1203#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
1204#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
1205#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
1206#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
1156 1207
1157/* MII clause 22/28 definitions */ 1208/* MII clause 22/28 definitions */
1158#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ 1209#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
@@ -1696,12 +1747,14 @@ enum {
1696#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ 1747#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
1697 1748
1698/* SW_FW_SYNC/GSSR definitions */ 1749/* SW_FW_SYNC/GSSR definitions */
1699#define IXGBE_GSSR_EEP_SM 0x0001 1750#define IXGBE_GSSR_EEP_SM 0x0001
1700#define IXGBE_GSSR_PHY0_SM 0x0002 1751#define IXGBE_GSSR_PHY0_SM 0x0002
1701#define IXGBE_GSSR_PHY1_SM 0x0004 1752#define IXGBE_GSSR_PHY1_SM 0x0004
1702#define IXGBE_GSSR_MAC_CSR_SM 0x0008 1753#define IXGBE_GSSR_MAC_CSR_SM 0x0008
1703#define IXGBE_GSSR_FLASH_SM 0x0010 1754#define IXGBE_GSSR_FLASH_SM 0x0010
1704#define IXGBE_GSSR_SW_MNG_SM 0x0400 1755#define IXGBE_GSSR_SW_MNG_SM 0x0400
1756#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
1757#define IXGBE_GSSR_I2C_MASK 0x1800
1705 1758
1706/* FW Status register bitmask */ 1759/* FW Status register bitmask */
1707#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ 1760#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@@ -1735,27 +1788,32 @@ enum {
1735#define IXGBE_PBANUM_LENGTH 11 1788#define IXGBE_PBANUM_LENGTH 11
1736 1789
1737/* Checksum and EEPROM pointers */ 1790/* Checksum and EEPROM pointers */
1738#define IXGBE_PBANUM_PTR_GUARD 0xFAFA 1791#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
1739#define IXGBE_EEPROM_CHECKSUM 0x3F 1792#define IXGBE_EEPROM_CHECKSUM 0x3F
1740#define IXGBE_EEPROM_SUM 0xBABA 1793#define IXGBE_EEPROM_SUM 0xBABA
1741#define IXGBE_PCIE_ANALOG_PTR 0x03 1794#define IXGBE_PCIE_ANALOG_PTR 0x03
1742#define IXGBE_ATLAS0_CONFIG_PTR 0x04 1795#define IXGBE_ATLAS0_CONFIG_PTR 0x04
1743#define IXGBE_PHY_PTR 0x04 1796#define IXGBE_PHY_PTR 0x04
1744#define IXGBE_ATLAS1_CONFIG_PTR 0x05 1797#define IXGBE_ATLAS1_CONFIG_PTR 0x05
1745#define IXGBE_OPTION_ROM_PTR 0x05 1798#define IXGBE_OPTION_ROM_PTR 0x05
1746#define IXGBE_PCIE_GENERAL_PTR 0x06 1799#define IXGBE_PCIE_GENERAL_PTR 0x06
1747#define IXGBE_PCIE_CONFIG0_PTR 0x07 1800#define IXGBE_PCIE_CONFIG0_PTR 0x07
1748#define IXGBE_PCIE_CONFIG1_PTR 0x08 1801#define IXGBE_PCIE_CONFIG1_PTR 0x08
1749#define IXGBE_CORE0_PTR 0x09 1802#define IXGBE_CORE0_PTR 0x09
1750#define IXGBE_CORE1_PTR 0x0A 1803#define IXGBE_CORE1_PTR 0x0A
1751#define IXGBE_MAC0_PTR 0x0B 1804#define IXGBE_MAC0_PTR 0x0B
1752#define IXGBE_MAC1_PTR 0x0C 1805#define IXGBE_MAC1_PTR 0x0C
1753#define IXGBE_CSR0_CONFIG_PTR 0x0D 1806#define IXGBE_CSR0_CONFIG_PTR 0x0D
1754#define IXGBE_CSR1_CONFIG_PTR 0x0E 1807#define IXGBE_CSR1_CONFIG_PTR 0x0E
1755#define IXGBE_FW_PTR 0x0F 1808#define IXGBE_PCIE_ANALOG_PTR_X550 0x02
1756#define IXGBE_PBANUM0_PTR 0x15 1809#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000
1757#define IXGBE_PBANUM1_PTR 0x16 1810#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24
1758#define IXGBE_FREE_SPACE_PTR 0X3E 1811#define IXGBE_PCIE_CONFIG_SIZE 0x08
1812#define IXGBE_EEPROM_LAST_WORD 0x41
1813#define IXGBE_FW_PTR 0x0F
1814#define IXGBE_PBANUM0_PTR 0x15
1815#define IXGBE_PBANUM1_PTR 0x16
1816#define IXGBE_FREE_SPACE_PTR 0X3E
1759 1817
1760/* External Thermal Sensor Config */ 1818/* External Thermal Sensor Config */
1761#define IXGBE_ETS_CFG 0x26 1819#define IXGBE_ETS_CFG 0x26
@@ -2016,6 +2074,7 @@ enum {
2016#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 2074#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
2017#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 2075#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
2018#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 2076#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
2077#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000
2019#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 2078#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
2020 2079
2021#define IXGBE_FWSM_TS_ENABLED 0x1 2080#define IXGBE_FWSM_TS_ENABLED 0x1
@@ -2312,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type {
2312#define IXGBE_FDIR_DROP_QUEUE 127 2371#define IXGBE_FDIR_DROP_QUEUE 127
2313 2372
2314/* Manageablility Host Interface defines */ 2373/* Manageablility Host Interface defines */
2315#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ 2374#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
2316#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ 2375#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
2317#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ 2376#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
2377#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */
2378#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */
2379#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */
2318 2380
2319/* CEM Support */ 2381/* CEM Support */
2320#define FW_CEM_HDR_LEN 0x4 2382#define FW_CEM_HDR_LEN 0x4
2321#define FW_CEM_CMD_DRIVER_INFO 0xDD 2383#define FW_CEM_CMD_DRIVER_INFO 0xDD
2322#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 2384#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
2323#define FW_CEM_CMD_RESERVED 0x0 2385#define FW_CEM_CMD_RESERVED 0x0
2324#define FW_CEM_UNUSED_VER 0x0 2386#define FW_CEM_UNUSED_VER 0x0
2325#define FW_CEM_MAX_RETRIES 3 2387#define FW_CEM_MAX_RETRIES 3
2326#define FW_CEM_RESP_STATUS_SUCCESS 0x1 2388#define FW_CEM_RESP_STATUS_SUCCESS 0x1
2389#define FW_READ_SHADOW_RAM_CMD 0x31
2390#define FW_READ_SHADOW_RAM_LEN 0x6
2391#define FW_WRITE_SHADOW_RAM_CMD 0x33
2392#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */
2393#define FW_SHADOW_RAM_DUMP_CMD 0x36
2394#define FW_SHADOW_RAM_DUMP_LEN 0
2395#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
2396#define FW_NVM_DATA_OFFSET 3
2397#define FW_MAX_READ_BUFFER_SIZE 1024
2398#define FW_DISABLE_RXEN_CMD 0xDE
2399#define FW_DISABLE_RXEN_LEN 0x1
2327 2400
2328/* Host Interface Command Structures */ 2401/* Host Interface Command Structures */
2329struct ixgbe_hic_hdr { 2402struct ixgbe_hic_hdr {
@@ -2336,6 +2409,25 @@ struct ixgbe_hic_hdr {
2336 u8 checksum; 2409 u8 checksum;
2337}; 2410};
2338 2411
2412struct ixgbe_hic_hdr2_req {
2413 u8 cmd;
2414 u8 buf_lenh;
2415 u8 buf_lenl;
2416 u8 checksum;
2417};
2418
2419struct ixgbe_hic_hdr2_rsp {
2420 u8 cmd;
2421 u8 buf_lenl;
2422 u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
2423 u8 checksum;
2424};
2425
2426union ixgbe_hic_hdr2 {
2427 struct ixgbe_hic_hdr2_req req;
2428 struct ixgbe_hic_hdr2_rsp rsp;
2429};
2430
2339struct ixgbe_hic_drv_info { 2431struct ixgbe_hic_drv_info {
2340 struct ixgbe_hic_hdr hdr; 2432 struct ixgbe_hic_hdr hdr;
2341 u8 port_num; 2433 u8 port_num;
@@ -2347,6 +2439,32 @@ struct ixgbe_hic_drv_info {
2347 u16 pad2; /* end spacing to ensure length is mult. of dword2 */ 2439 u16 pad2; /* end spacing to ensure length is mult. of dword2 */
2348}; 2440};
2349 2441
2442/* These need to be dword aligned */
2443struct ixgbe_hic_read_shadow_ram {
2444 union ixgbe_hic_hdr2 hdr;
2445 u32 address;
2446 u16 length;
2447 u16 pad2;
2448 u16 data;
2449 u16 pad3;
2450};
2451
2452struct ixgbe_hic_write_shadow_ram {
2453 union ixgbe_hic_hdr2 hdr;
2454 u32 address;
2455 u16 length;
2456 u16 pad2;
2457 u16 data;
2458 u16 pad3;
2459};
2460
2461struct ixgbe_hic_disable_rxen {
2462 struct ixgbe_hic_hdr hdr;
2463 u8 port_number;
2464 u8 pad2;
2465 u16 pad3;
2466};
2467
2350/* Transmit Descriptor - Advanced */ 2468/* Transmit Descriptor - Advanced */
2351union ixgbe_adv_tx_desc { 2469union ixgbe_adv_tx_desc {
2352 struct { 2470 struct {
@@ -2623,6 +2741,9 @@ enum ixgbe_phy_type {
2623 ixgbe_phy_none, 2741 ixgbe_phy_none,
2624 ixgbe_phy_tn, 2742 ixgbe_phy_tn,
2625 ixgbe_phy_aq, 2743 ixgbe_phy_aq,
2744 ixgbe_phy_x550em_kr,
2745 ixgbe_phy_x550em_kx4,
2746 ixgbe_phy_x550em_ext_t,
2626 ixgbe_phy_cu_unknown, 2747 ixgbe_phy_cu_unknown,
2627 ixgbe_phy_qt, 2748 ixgbe_phy_qt,
2628 ixgbe_phy_xaui, 2749 ixgbe_phy_xaui,
@@ -2866,7 +2987,7 @@ struct ixgbe_eeprom_operations {
2866 s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); 2987 s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
2867 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 2988 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
2868 s32 (*update_checksum)(struct ixgbe_hw *); 2989 s32 (*update_checksum)(struct ixgbe_hw *);
2869 u16 (*calc_checksum)(struct ixgbe_hw *); 2990 s32 (*calc_checksum)(struct ixgbe_hw *);
2870}; 2991};
2871 2992
2872struct ixgbe_mac_operations { 2993struct ixgbe_mac_operations {
@@ -2888,8 +3009,8 @@ struct ixgbe_mac_operations {
2888 s32 (*disable_rx_buff)(struct ixgbe_hw *); 3009 s32 (*disable_rx_buff)(struct ixgbe_hw *);
2889 s32 (*enable_rx_buff)(struct ixgbe_hw *); 3010 s32 (*enable_rx_buff)(struct ixgbe_hw *);
2890 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 3011 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2891 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); 3012 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
2892 void (*release_swfw_sync)(struct ixgbe_hw *, u16); 3013 void (*release_swfw_sync)(struct ixgbe_hw *, u32);
2893 s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); 3014 s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
2894 s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); 3015 s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
2895 3016
@@ -2935,6 +3056,11 @@ struct ixgbe_mac_operations {
2935 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 3056 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2936 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 3057 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2937 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 3058 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
3059
3060 /* DMA Coalescing */
3061 s32 (*dmac_config)(struct ixgbe_hw *hw);
3062 s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
3063 s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
2938}; 3064};
2939 3065
2940struct ixgbe_phy_operations { 3066struct ixgbe_phy_operations {
@@ -2947,6 +3073,7 @@ struct ixgbe_phy_operations {
2947 s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); 3073 s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
2948 s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); 3074 s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
2949 s32 (*setup_link)(struct ixgbe_hw *); 3075 s32 (*setup_link)(struct ixgbe_hw *);
3076 s32 (*setup_internal_link)(struct ixgbe_hw *);
2950 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); 3077 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
2951 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); 3078 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
2952 s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); 3079 s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
@@ -2955,6 +3082,8 @@ struct ixgbe_phy_operations {
2955 s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); 3082 s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
2956 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 3083 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
2957 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); 3084 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
3085 s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
3086 s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
2958 s32 (*check_overtemp)(struct ixgbe_hw *); 3087 s32 (*check_overtemp)(struct ixgbe_hw *);
2959}; 3088};
2960 3089
@@ -3007,6 +3136,8 @@ struct ixgbe_phy_info {
3007 bool sfp_setup_needed; 3136 bool sfp_setup_needed;
3008 u32 revision; 3137 u32 revision;
3009 enum ixgbe_media_type media_type; 3138 enum ixgbe_media_type media_type;
3139 u8 lan_id;
3140 u32 phy_semaphore_mask;
3010 bool reset_disable; 3141 bool reset_disable;
3011 ixgbe_autoneg_advertised autoneg_advertised; 3142 ixgbe_autoneg_advertised autoneg_advertised;
3012 enum ixgbe_smart_speed smart_speed; 3143 enum ixgbe_smart_speed smart_speed;
@@ -3113,4 +3244,71 @@ struct ixgbe_info {
3113#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 3244#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
3114#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 3245#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
3115 3246
3247#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
3248#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
3249#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
3250#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
3251#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
3252#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
3253#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
3254#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
3255
3256#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
3257#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
3258
3259#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
3260#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
3261#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
3262#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
3263#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
3264#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
3265#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
3266#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
3267#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
3268#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
3269#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
3270
3271#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
3272#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
3273#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
3274
3275#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
3276#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
3277
3278#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
3279
3280#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
3281#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
3282#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
3283#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
3284
3285#define IXGBE_KX4_LINK_CNTL_1 0x4C
3286#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
3287#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
3288#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
3289#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
3290#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
3291#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
3292#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
3293
3294#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
3295#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
3296
3297#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0
3298#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF
3299#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18
3300#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \
3301 (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT)
3302#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20
3303#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \
3304 (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT)
3305#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
3306#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
3307#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
3308#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
3309#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
3310#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
3311#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
3312#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
3313
3116#endif /* _IXGBE_TYPE_H_ */ 3314#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e88305d5d18d..ba54ff07b438 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -32,6 +32,7 @@
32 32
33#include "ixgbe.h" 33#include "ixgbe.h"
34#include "ixgbe_phy.h" 34#include "ixgbe_phy.h"
35#include "ixgbe_x540.h"
35 36
36#define IXGBE_X540_MAX_TX_QUEUES 128 37#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128 38#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -42,17 +43,15 @@
42 43
43static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); 44static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
44static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); 45static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
45static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
46static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
47static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); 46static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
48static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); 47static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
49 48
50static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) 49enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
51{ 50{
52 return ixgbe_media_type_copper; 51 return ixgbe_media_type_copper;
53} 52}
54 53
55static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) 54s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
56{ 55{
57 struct ixgbe_mac_info *mac = &hw->mac; 56 struct ixgbe_mac_info *mac = &hw->mac;
58 57
@@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
76 * @speed: new link speed 75 * @speed: new link speed
77 * @autoneg_wait_to_complete: true when waiting for completion is needed 76 * @autoneg_wait_to_complete: true when waiting for completion is needed
78 **/ 77 **/
79static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, 78s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
80 ixgbe_link_speed speed, 79 bool autoneg_wait_to_complete)
81 bool autoneg_wait_to_complete)
82{ 80{
83 return hw->phy.ops.setup_link_speed(hw, speed, 81 return hw->phy.ops.setup_link_speed(hw, speed,
84 autoneg_wait_to_complete); 82 autoneg_wait_to_complete);
@@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
92 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 90 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
93 * reset. 91 * reset.
94 **/ 92 **/
95static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) 93s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
96{ 94{
97 s32 status; 95 s32 status;
98 u32 ctrl, i; 96 u32 ctrl, i;
@@ -179,7 +177,7 @@ mac_reset_top:
179 * and the generation start_hw function. 177 * and the generation start_hw function.
180 * Then performs revision-specific operations, if any. 178 * Then performs revision-specific operations, if any.
181 **/ 179 **/
182static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) 180s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
183{ 181{
184 s32 ret_val; 182 s32 ret_val;
185 183
@@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
197 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 195 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
198 * ixgbe_hw struct in order to set up EEPROM access. 196 * ixgbe_hw struct in order to set up EEPROM access.
199 **/ 197 **/
200static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) 198s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
201{ 199{
202 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 200 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
203 u32 eec; 201 u32 eec;
@@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
316 * 314 *
317 * @hw: pointer to hardware structure 315 * @hw: pointer to hardware structure
318 **/ 316 **/
319static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) 317static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
320{ 318{
321 u16 i; 319 u16 i;
322 u16 j; 320 u16 j;
@@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
324 u16 length = 0; 322 u16 length = 0;
325 u16 pointer = 0; 323 u16 pointer = 0;
326 u16 word = 0; 324 u16 word = 0;
325 u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
326 u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
327 327
328 /* 328 /*
329 * Do not use hw->eeprom.ops.read because we do not want to take 329 * Do not use hw->eeprom.ops.read because we do not want to take
@@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
332 */ 332 */
333 333
334 /* Include 0x0-0x3F in the checksum */ 334 /* Include 0x0-0x3F in the checksum */
335 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 335 for (i = 0; i < checksum_last_word; i++) {
336 if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { 336 if (ixgbe_read_eerd_generic(hw, i, &word)) {
337 hw_dbg(hw, "EEPROM read failed\n"); 337 hw_dbg(hw, "EEPROM read failed\n");
338 break; 338 return IXGBE_ERR_EEPROM;
339 } 339 }
340 checksum += word; 340 checksum += word;
341 } 341 }
@@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
344 * Include all data from pointers 0x3, 0x6-0xE. This excludes the 344 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
345 * FW, PHY module, and PCIe Expansion/Option ROM pointers. 345 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
346 */ 346 */
347 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 347 for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
348 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) 348 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
349 continue; 349 continue;
350 350
351 if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { 351 if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
352 hw_dbg(hw, "EEPROM read failed\n"); 352 hw_dbg(hw, "EEPROM read failed\n");
353 break; 353 break;
354 } 354 }
@@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
358 pointer >= hw->eeprom.word_size) 358 pointer >= hw->eeprom.word_size)
359 continue; 359 continue;
360 360
361 if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { 361 if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
362 hw_dbg(hw, "EEPROM read failed\n"); 362 hw_dbg(hw, "EEPROM read failed\n");
363 return IXGBE_ERR_EEPROM;
363 break; 364 break;
364 } 365 }
365 366
@@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
368 (pointer + length) >= hw->eeprom.word_size) 369 (pointer + length) >= hw->eeprom.word_size)
369 continue; 370 continue;
370 371
371 for (j = pointer+1; j <= pointer+length; j++) { 372 for (j = pointer + 1; j <= pointer + length; j++) {
372 if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { 373 if (ixgbe_read_eerd_generic(hw, j, &word)) {
373 hw_dbg(hw, "EEPROM read failed\n"); 374 hw_dbg(hw, "EEPROM read failed\n");
374 break; 375 return IXGBE_ERR_EEPROM;
375 } 376 }
376 checksum += word; 377 checksum += word;
377 } 378 }
@@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
379 380
380 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 381 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
381 382
382 return checksum; 383 return (s32)checksum;
383} 384}
384 385
385/** 386/**
@@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
410 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) 411 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
411 return IXGBE_ERR_SWFW_SYNC; 412 return IXGBE_ERR_SWFW_SYNC;
412 413
413 checksum = hw->eeprom.ops.calc_checksum(hw); 414 status = hw->eeprom.ops.calc_checksum(hw);
415 if (status < 0)
416 goto out;
417
418 checksum = (u16)(status & 0xffff);
414 419
415 /* Do not use hw->eeprom.ops.read because we do not want to take 420 /* Do not use hw->eeprom.ops.read because we do not want to take
416 * the synchronization semaphores twice here. 421 * the synchronization semaphores twice here.
417 */ 422 */
418 status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, 423 status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
419 &read_checksum); 424 &read_checksum);
425 if (status)
426 goto out;
420 427
421 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 428 /* Verify read checksum from EEPROM is the same as
429 * calculated checksum
430 */
431 if (read_checksum != checksum) {
432 hw_dbg(hw, "Invalid EEPROM checksum");
433 status = IXGBE_ERR_EEPROM_CHECKSUM;
434 }
422 435
423 /* If the user cares, return the calculated checksum */ 436 /* If the user cares, return the calculated checksum */
424 if (checksum_val) 437 if (checksum_val)
425 *checksum_val = checksum; 438 *checksum_val = checksum;
426 439
427 /* Verify read and calculated checksums are the same */ 440out:
428 if (read_checksum != checksum) 441 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
429 return IXGBE_ERR_EEPROM_CHECKSUM;
430 442
431 return status; 443 return status;
432} 444}
@@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
457 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) 469 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
458 return IXGBE_ERR_SWFW_SYNC; 470 return IXGBE_ERR_SWFW_SYNC;
459 471
460 checksum = hw->eeprom.ops.calc_checksum(hw); 472 status = hw->eeprom.ops.calc_checksum(hw);
473 if (status < 0)
474 goto out;
475
476 checksum = (u16)(status & 0xffff);
461 477
462 /* Do not use hw->eeprom.ops.write because we do not want to 478 /* Do not use hw->eeprom.ops.write because we do not want to
463 * take the synchronization semaphores twice here. 479 * take the synchronization semaphores twice here.
464 */ 480 */
465 status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); 481 status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
466 if (!status) 482 if (status)
467 status = ixgbe_update_flash_X540(hw); 483 goto out;
484
485 status = ixgbe_update_flash_X540(hw);
468 486
487out:
469 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 488 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
470 return status; 489 return status;
471} 490}
@@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
544 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for 563 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
545 * the specified function (CSR, PHY0, PHY1, NVM, Flash) 564 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
546 **/ 565 **/
547static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) 566s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
548{ 567{
549 u32 swfw_sync; 568 u32 swfw_sync;
550 u32 swmask = mask; 569 u32 swmask = mask;
@@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
612 * Releases the SWFW semaphore through the SW_FW_SYNC register 631 * Releases the SWFW semaphore through the SW_FW_SYNC register
613 * for the specified function (CSR, PHY0, PHY1, EVM, Flash) 632 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
614 **/ 633 **/
615static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) 634void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
616{ 635{
617 u32 swfw_sync; 636 u32 swfw_sync;
618 u32 swmask = mask; 637 u32 swmask = mask;
@@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
699 * Devices that implement the version 2 interface: 718 * Devices that implement the version 2 interface:
700 * X540 719 * X540
701 **/ 720 **/
702static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) 721s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
703{ 722{
704 u32 macc_reg; 723 u32 macc_reg;
705 u32 ledctl_reg; 724 u32 ledctl_reg;
@@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
735 * Devices that implement the version 2 interface: 754 * Devices that implement the version 2 interface:
736 * X540 755 * X540
737 **/ 756 **/
738static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) 757s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
739{ 758{
740 u32 macc_reg; 759 u32 macc_reg;
741 u32 ledctl_reg; 760 u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
new file mode 100644
index 000000000000..a1468b1f4d8a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -0,0 +1,39 @@
1/*******************************************************************************
2 *
3 * Intel 10 Gigabit PCI Express Linux driver
4 * Copyright(c) 1999 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * Linux NICS <linux.nics@intel.com>
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 *
23 *****************************************************************************/
24
25#include "ixgbe_type.h"
26
27s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
28s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
29 bool autoneg_wait_to_complete);
30s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
31s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
32enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
33s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
34 bool autoneg_wait_to_complete);
35s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
36s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
37s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
38void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
39s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
new file mode 100644
index 000000000000..ffdd1231f419
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -0,0 +1,1432 @@
1/*******************************************************************************
2 *
3 * Intel 10 Gigabit PCI Express Linux driver
4 * Copyright(c) 1999 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * Linux NICS <linux.nics@intel.com>
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 *
23 ******************************************************************************/
24#include "ixgbe_x540.h"
25#include "ixgbe_type.h"
26#include "ixgbe_common.h"
27#include "ixgbe_phy.h"
28
29/** ixgbe_identify_phy_x550em - Get PHY type based on device id
30 * @hw: pointer to hardware structure
31 *
32 * Returns error code
33 */
34static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
35{
36 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
37
38 switch (hw->device_id) {
39 case IXGBE_DEV_ID_X550EM_X_SFP:
40 /* set up for CS4227 usage */
41 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
42 if (hw->bus.lan_id) {
43 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
44 esdp |= IXGBE_ESDP_SDP1_DIR;
45 }
46 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
47 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
48
49 return ixgbe_identify_module_generic(hw);
50 case IXGBE_DEV_ID_X550EM_X_KX4:
51 hw->phy.type = ixgbe_phy_x550em_kx4;
52 break;
53 case IXGBE_DEV_ID_X550EM_X_KR:
54 hw->phy.type = ixgbe_phy_x550em_kr;
55 break;
56 case IXGBE_DEV_ID_X550EM_X_1G_T:
57 case IXGBE_DEV_ID_X550EM_X_10G_T:
58 return ixgbe_identify_phy_generic(hw);
59 default:
60 break;
61 }
62 return 0;
63}
64
65static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
66 u32 device_type, u16 *phy_data)
67{
68 return IXGBE_NOT_IMPLEMENTED;
69}
70
71static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
72 u32 device_type, u16 phy_data)
73{
74 return IXGBE_NOT_IMPLEMENTED;
75}
76
77/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
78 * @hw: pointer to hardware structure
79 *
80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
81 * ixgbe_hw struct in order to set up EEPROM access.
82 **/
83s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
84{
85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
86 u32 eec;
87 u16 eeprom_size;
88
89 if (eeprom->type == ixgbe_eeprom_uninitialized) {
90 eeprom->semaphore_delay = 10;
91 eeprom->type = ixgbe_flash;
92
93 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
94 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
95 IXGBE_EEC_SIZE_SHIFT);
96 eeprom->word_size = 1 << (eeprom_size +
97 IXGBE_EEPROM_WORD_SIZE_SHIFT);
98
99 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
100 eeprom->type, eeprom->word_size);
101 }
102
103 return 0;
104}
105
106/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
107 * IOSF device
108 * @hw: pointer to hardware structure
109 * @reg_addr: 32 bit PHY register to write
110 * @device_type: 3 bit device type
111 * @phy_data: Pointer to read data from the register
112 **/
113s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
114 u32 device_type, u32 *data)
115{
116 u32 i, command, error;
117
118 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
119 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
120
121 /* Write IOSF control register */
122 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
123
124 /* Check every 10 usec to see if the address cycle completed.
125 * The SB IOSF BUSY bit will clear when the operation is
126 * complete
127 */
128 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
129 usleep_range(10, 20);
130
131 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
132 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
133 break;
134 }
135
136 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
137 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
138 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
139 hw_dbg(hw, "Failed to read, error %x\n", error);
140 return IXGBE_ERR_PHY;
141 }
142
143 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
144 hw_dbg(hw, "Read timed out\n");
145 return IXGBE_ERR_PHY;
146 }
147
148 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
149
150 return 0;
151}
152
153/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
154 * command assuming that the semaphore is already obtained.
155 * @hw: pointer to hardware structure
156 * @offset: offset of word in the EEPROM to read
157 * @data: word read from the EEPROM
158 *
159 * Reads a 16 bit word from the EEPROM using the hostif.
160 **/
161s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
162{
163 s32 status;
164 struct ixgbe_hic_read_shadow_ram buffer;
165
166 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
167 buffer.hdr.req.buf_lenh = 0;
168 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
169 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
170
171 /* convert offset from words to bytes */
172 buffer.address = cpu_to_be32(offset * 2);
173 /* one word */
174 buffer.length = cpu_to_be16(sizeof(u16));
175
176 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
177 sizeof(buffer),
178 IXGBE_HI_COMMAND_TIMEOUT, false);
179 if (status)
180 return status;
181
182 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
183 FW_NVM_DATA_OFFSET);
184
185 return 0;
186}
187
188/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
189 * @hw: pointer to hardware structure
190 * @offset: offset of word in the EEPROM to read
191 * @words: number of words
192 * @data: word(s) read from the EEPROM
193 *
194 * Reads a 16 bit word(s) from the EEPROM using the hostif.
195 **/
196s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
197 u16 offset, u16 words, u16 *data)
198{
199 struct ixgbe_hic_read_shadow_ram buffer;
200 u32 current_word = 0;
201 u16 words_to_read;
202 s32 status;
203 u32 i;
204
205 /* Take semaphore for the entire operation. */
206 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
207 if (status) {
208 hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
209 return status;
210 }
211
212 while (words) {
213 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
214 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
215 else
216 words_to_read = words;
217
218 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
219 buffer.hdr.req.buf_lenh = 0;
220 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
221 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
222
223 /* convert offset from words to bytes */
224 buffer.address = cpu_to_be32((offset + current_word) * 2);
225 buffer.length = cpu_to_be16(words_to_read * 2);
226
227 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
228 sizeof(buffer),
229 IXGBE_HI_COMMAND_TIMEOUT,
230 false);
231 if (status) {
232 hw_dbg(hw, "Host interface command failed\n");
233 goto out;
234 }
235
236 for (i = 0; i < words_to_read; i++) {
237 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
238 2 * i;
239 u32 value = IXGBE_READ_REG(hw, reg);
240
241 data[current_word] = (u16)(value & 0xffff);
242 current_word++;
243 i++;
244 if (i < words_to_read) {
245 value >>= 16;
246 data[current_word] = (u16)(value & 0xffff);
247 current_word++;
248 }
249 }
250 words -= words_to_read;
251 }
252
253out:
254 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
255 return status;
256}
257
258/** ixgbe_checksum_ptr_x550 - Checksum one pointer region
259 * @hw: pointer to hardware structure
260 * @ptr: pointer offset in eeprom
261 * @size: size of section pointed by ptr, if 0 first word will be used as size
262 * @csum: address of checksum to update
263 *
264 * Returns error status for any failure
265 **/
266static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
267 u16 size, u16 *csum, u16 *buffer,
268 u32 buffer_size)
269{
270 u16 buf[256];
271 s32 status;
272 u16 length, bufsz, i, start;
273 u16 *local_buffer;
274
275 bufsz = sizeof(buf) / sizeof(buf[0]);
276
277 /* Read a chunk at the pointer location */
278 if (!buffer) {
279 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
280 if (status) {
281 hw_dbg(hw, "Failed to read EEPROM image\n");
282 return status;
283 }
284 local_buffer = buf;
285 } else {
286 if (buffer_size < ptr)
287 return IXGBE_ERR_PARAM;
288 local_buffer = &buffer[ptr];
289 }
290
291 if (size) {
292 start = 0;
293 length = size;
294 } else {
295 start = 1;
296 length = local_buffer[0];
297
298 /* Skip pointer section if length is invalid. */
299 if (length == 0xFFFF || length == 0 ||
300 (ptr + length) >= hw->eeprom.word_size)
301 return 0;
302 }
303
304 if (buffer && ((u32)start + (u32)length > buffer_size))
305 return IXGBE_ERR_PARAM;
306
307 for (i = start; length; i++, length--) {
308 if (i == bufsz && !buffer) {
309 ptr += bufsz;
310 i = 0;
311 if (length < bufsz)
312 bufsz = length;
313
314 /* Read a chunk at the pointer location */
315 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
316 bufsz, buf);
317 if (status) {
318 hw_dbg(hw, "Failed to read EEPROM image\n");
319 return status;
320 }
321 }
322 *csum += local_buffer[i];
323 }
324 return 0;
325}
326
327/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum
328 * @hw: pointer to hardware structure
329 * @buffer: pointer to buffer containing calculated checksum
330 * @buffer_size: size of buffer
331 *
332 * Returns a negative error code on error, or the 16-bit checksum
333 **/
334s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
335{
336 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
337 u16 *local_buffer;
338 s32 status;
339 u16 checksum = 0;
340 u16 pointer, i, size;
341
342 hw->eeprom.ops.init_params(hw);
343
344 if (!buffer) {
345 /* Read pointer area */
346 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
347 IXGBE_EEPROM_LAST_WORD + 1,
348 eeprom_ptrs);
349 if (status) {
350 hw_dbg(hw, "Failed to read EEPROM image\n");
351 return status;
352 }
353 local_buffer = eeprom_ptrs;
354 } else {
355 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
356 return IXGBE_ERR_PARAM;
357 local_buffer = buffer;
358 }
359
360 /* For X550 hardware include 0x0-0x41 in the checksum, skip the
361 * checksum word itself
362 */
363 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
364 if (i != IXGBE_EEPROM_CHECKSUM)
365 checksum += local_buffer[i];
366
367 /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
368 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
369 */
370 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
371 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
372 continue;
373
374 pointer = local_buffer[i];
375
376 /* Skip pointer section if the pointer is invalid. */
377 if (pointer == 0xFFFF || pointer == 0 ||
378 pointer >= hw->eeprom.word_size)
379 continue;
380
381 switch (i) {
382 case IXGBE_PCIE_GENERAL_PTR:
383 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
384 break;
385 case IXGBE_PCIE_CONFIG0_PTR:
386 case IXGBE_PCIE_CONFIG1_PTR:
387 size = IXGBE_PCIE_CONFIG_SIZE;
388 break;
389 default:
390 size = 0;
391 break;
392 }
393
394 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
395 buffer, buffer_size);
396 if (status)
397 return status;
398 }
399
400 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
401
402 return (s32)checksum;
403}
404
405/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
406 * @hw: pointer to hardware structure
407 *
408 * Returns a negative error code on error, or the 16-bit checksum
409 **/
410s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
411{
412 return ixgbe_calc_checksum_X550(hw, NULL, 0);
413}
414
415/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
416 * @hw: pointer to hardware structure
417 * @offset: offset of word in the EEPROM to read
418 * @data: word read from the EEPROM
419 *
420 * Reads a 16 bit word from the EEPROM using the hostif.
421 **/
422s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
423{
424 s32 status = 0;
425
426 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
427 status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
428 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
429 } else {
430 status = IXGBE_ERR_SWFW_SYNC;
431 }
432
433 return status;
434}
435
436/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
437 * @hw: pointer to hardware structure
438 * @checksum_val: calculated checksum
439 *
440 * Performs checksum calculation and validates the EEPROM checksum. If the
441 * caller does not need checksum_val, the value can be NULL.
442 **/
443s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
444{
445 s32 status;
446 u16 checksum;
447 u16 read_checksum = 0;
448
449 /* Read the first word from the EEPROM. If this times out or fails, do
450 * not continue or we could be in for a very long wait while every
451 * EEPROM read fails
452 */
453 status = hw->eeprom.ops.read(hw, 0, &checksum);
454 if (status) {
455 hw_dbg(hw, "EEPROM read failed\n");
456 return status;
457 }
458
459 status = hw->eeprom.ops.calc_checksum(hw);
460 if (status < 0)
461 return status;
462
463 checksum = (u16)(status & 0xffff);
464
465 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
466 &read_checksum);
467 if (status)
468 return status;
469
470 /* Verify read checksum from EEPROM is the same as
471 * calculated checksum
472 */
473 if (read_checksum != checksum) {
474 status = IXGBE_ERR_EEPROM_CHECKSUM;
475 hw_dbg(hw, "Invalid EEPROM checksum");
476 }
477
478 /* If the user cares, return the calculated checksum */
479 if (checksum_val)
480 *checksum_val = checksum;
481
482 return status;
483}
484
485/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
486 * @hw: pointer to hardware structure
487 * @offset: offset of word in the EEPROM to write
488 * @data: word write to the EEPROM
489 *
490 * Write a 16 bit word to the EEPROM using the hostif.
491 **/
492s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
493{
494 s32 status;
495 struct ixgbe_hic_write_shadow_ram buffer;
496
497 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
498 buffer.hdr.req.buf_lenh = 0;
499 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
500 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
501
502 /* one word */
503 buffer.length = cpu_to_be16(sizeof(u16));
504 buffer.data = data;
505 buffer.address = cpu_to_be32(offset * 2);
506
507 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
508 sizeof(buffer),
509 IXGBE_HI_COMMAND_TIMEOUT, false);
510 return status;
511}
512
513/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
514 * @hw: pointer to hardware structure
515 * @offset: offset of word in the EEPROM to write
516 * @data: word write to the EEPROM
517 *
518 * Write a 16 bit word to the EEPROM using the hostif.
519 **/
520s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
521{
522 s32 status = 0;
523
524 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
525 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
526 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
527 } else {
528 hw_dbg(hw, "write ee hostif failed to get semaphore");
529 status = IXGBE_ERR_SWFW_SYNC;
530 }
531
532 return status;
533}
534
535/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
536 * @hw: pointer to hardware structure
537 *
538 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
539 **/
540s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
541{
542 s32 status = 0;
543 union ixgbe_hic_hdr2 buffer;
544
545 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
546 buffer.req.buf_lenh = 0;
547 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
548 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
549
550 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
551 sizeof(buffer),
552 IXGBE_HI_COMMAND_TIMEOUT, false);
553 return status;
554}
555
556/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
557 * @hw: pointer to hardware structure
558 *
559 * After writing EEPROM to shadow RAM using EEWR register, software calculates
560 * checksum and updates the EEPROM and instructs the hardware to update
561 * the flash.
562 **/
563s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
564{
565 s32 status;
566 u16 checksum = 0;
567
568 /* Read the first word from the EEPROM. If this times out or fails, do
569 * not continue or we could be in for a very long wait while every
570 * EEPROM read fails
571 */
572 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
573 if (status) {
574 hw_dbg(hw, "EEPROM read failed\n");
575 return status;
576 }
577
578 status = ixgbe_calc_eeprom_checksum_X550(hw);
579 if (status < 0)
580 return status;
581
582 checksum = (u16)(status & 0xffff);
583
584 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
585 checksum);
586 if (status)
587 return status;
588
589 status = ixgbe_update_flash_X550(hw);
590
591 return status;
592}
593
594/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
595 * @hw: pointer to hardware structure
596 * @offset: offset of word in the EEPROM to write
597 * @words: number of words
598 * @data: word(s) write to the EEPROM
599 *
600 *
601 * Write a 16 bit word(s) to the EEPROM using the hostif.
602 **/
603s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
604 u16 offset, u16 words, u16 *data)
605{
606 s32 status = 0;
607 u32 i = 0;
608
609 /* Take semaphore for the entire operation. */
610 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
611 if (status) {
612 hw_dbg(hw, "EEPROM write buffer - semaphore failed\n");
613 return status;
614 }
615
616 for (i = 0; i < words; i++) {
617 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
618 data[i]);
619 if (status) {
620 hw_dbg(hw, "Eeprom buffered write failed\n");
621 break;
622 }
623 }
624
625 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
626
627 return status;
628}
629
630/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
631 * @hw: pointer to hardware structure
632 **/
633void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
634{
635 struct ixgbe_mac_info *mac = &hw->mac;
636
637 /* CS4227 does not support autoneg, so disable the laser control
638 * functions for SFP+ fiber
639 */
640 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
641 mac->ops.disable_tx_laser = NULL;
642 mac->ops.enable_tx_laser = NULL;
643 mac->ops.flap_tx_laser = NULL;
644 }
645}
646
647/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
648 * @hw: pointer to hardware structure
649 */
650s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
651{
652 bool setup_linear;
653 u16 reg_slice, edc_mode;
654 s32 ret_val;
655
656 switch (hw->phy.sfp_type) {
657 case ixgbe_sfp_type_unknown:
658 return 0;
659 case ixgbe_sfp_type_not_present:
660 return IXGBE_ERR_SFP_NOT_PRESENT;
661 case ixgbe_sfp_type_da_cu_core0:
662 case ixgbe_sfp_type_da_cu_core1:
663 setup_linear = true;
664 break;
665 case ixgbe_sfp_type_srlr_core0:
666 case ixgbe_sfp_type_srlr_core1:
667 case ixgbe_sfp_type_da_act_lmt_core0:
668 case ixgbe_sfp_type_da_act_lmt_core1:
669 case ixgbe_sfp_type_1g_sx_core0:
670 case ixgbe_sfp_type_1g_sx_core1:
671 setup_linear = false;
672 break;
673 default:
674 return IXGBE_ERR_SFP_NOT_SUPPORTED;
675 }
676
677 ixgbe_init_mac_link_ops_X550em(hw);
678 hw->phy.ops.reset = NULL;
679
680 /* The CS4227 slice address is the base address + the port-pair reg
681 * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
682 */
683 reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
684
685 if (setup_linear)
686 edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
687 else
688 edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
689
690 /* Configure CS4227 for connection type. */
691 ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
692 edc_mode);
693
694 if (ret_val)
695 ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
696 edc_mode);
697
698 return ret_val;
699}
700
701/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
702 * @hw: pointer to hardware structure
703 * @speed: pointer to link speed
704 * @autoneg: true when autoneg or autotry is enabled
705 **/
706s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
707 ixgbe_link_speed *speed,
708 bool *autoneg)
709{
710 /* SFP */
711 if (hw->phy.media_type == ixgbe_media_type_fiber) {
712 /* CS4227 SFP must not enable auto-negotiation */
713 *autoneg = false;
714
715 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
716 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
717 *speed = IXGBE_LINK_SPEED_1GB_FULL;
718 return 0;
719 }
720
721 /* Link capabilities are based on SFP */
722 if (hw->phy.multispeed_fiber)
723 *speed = IXGBE_LINK_SPEED_10GB_FULL |
724 IXGBE_LINK_SPEED_1GB_FULL;
725 else
726 *speed = IXGBE_LINK_SPEED_10GB_FULL;
727 } else {
728 *speed = IXGBE_LINK_SPEED_10GB_FULL |
729 IXGBE_LINK_SPEED_1GB_FULL;
730 *autoneg = true;
731 }
732 return 0;
733}
734
735/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
736 * IOSF device
737 *
738 * @hw: pointer to hardware structure
739 * @reg_addr: 32 bit PHY register to write
740 * @device_type: 3 bit device type
741 * @data: Data to write to the register
742 **/
743s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
744 u32 device_type, u32 data)
745{
746 u32 i, command, error;
747
748 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
749 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
750
751 /* Write IOSF control register */
752 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
753
754 /* Write IOSF data register */
755 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
756
757 /* Check every 10 usec to see if the address cycle completed.
758 * The SB IOSF BUSY bit will clear when the operation is
759 * complete
760 */
761 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
762 usleep_range(10, 20);
763
764 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
765 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
766 break;
767 }
768
769 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
770 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
771 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
772 hw_dbg(hw, "Failed to write, error %x\n", error);
773 return IXGBE_ERR_PHY;
774 }
775
776 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
777 hw_dbg(hw, "Write timed out\n");
778 return IXGBE_ERR_PHY;
779 }
780
781 return 0;
782}
783
784/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
785 * @hw: pointer to hardware structure
786 * @speed: the link speed to force
787 *
788 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
789 * internal and external PHY at a specific speed, without autonegotiation.
790 **/
791static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
792{
793 s32 status;
794 u32 reg_val;
795
796 /* Disable AN and force speed to 10G Serial. */
797 status = ixgbe_read_iosf_sb_reg_x550(hw,
798 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
799 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
800 if (status)
801 return status;
802
803 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
804 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
805
806 /* Select forced link speed for internal PHY. */
807 switch (*speed) {
808 case IXGBE_LINK_SPEED_10GB_FULL:
809 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
810 break;
811 case IXGBE_LINK_SPEED_1GB_FULL:
812 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
813 break;
814 default:
815 /* Other link speeds are not supported by internal KR PHY. */
816 return IXGBE_ERR_LINK_SETUP;
817 }
818
819 status = ixgbe_write_iosf_sb_reg_x550(hw,
820 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
821 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
822 if (status)
823 return status;
824
825 /* Disable training protocol FSM. */
826 status = ixgbe_read_iosf_sb_reg_x550(hw,
827 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
828 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
829 if (status)
830 return status;
831
832 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
833 status = ixgbe_write_iosf_sb_reg_x550(hw,
834 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
835 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
836 if (status)
837 return status;
838
839 /* Disable Flex from training TXFFE. */
840 status = ixgbe_read_iosf_sb_reg_x550(hw,
841 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
842 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
843 if (status)
844 return status;
845
846 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
847 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
848 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
849 status = ixgbe_write_iosf_sb_reg_x550(hw,
850 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
851 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
852 if (status)
853 return status;
854
855 status = ixgbe_read_iosf_sb_reg_x550(hw,
856 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
857 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
858 if (status)
859 return status;
860
861 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
862 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
863 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
864 status = ixgbe_write_iosf_sb_reg_x550(hw,
865 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
866 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
867 if (status)
868 return status;
869
870 /* Enable override for coefficients. */
871 status = ixgbe_read_iosf_sb_reg_x550(hw,
872 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
873 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
874 if (status)
875 return status;
876
877 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
878 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
879 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
880 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
881 status = ixgbe_write_iosf_sb_reg_x550(hw,
882 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
883 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
884 if (status)
885 return status;
886
887 /* Toggle port SW reset by AN reset. */
888 status = ixgbe_read_iosf_sb_reg_x550(hw,
889 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
890 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
891 if (status)
892 return status;
893
894 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
895 status = ixgbe_write_iosf_sb_reg_x550(hw,
896 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
897 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
898
899 return status;
900}
901
902/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
903 * @hw: pointer to hardware structure
904 *
905 * Configures the integrated KX4 PHY.
906 **/
907s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
908{
909 s32 status;
910 u32 reg_val;
911
912 status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
913 IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
914 hw->bus.lan_id, &reg_val);
915 if (status)
916 return status;
917
918 reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
919 IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
920
921 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
922
923 /* Advertise 10G support. */
924 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
925 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
926
927 /* Advertise 1G support. */
928 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
929 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
930
931 /* Restart auto-negotiation. */
932 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
933 status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
934 IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
935 hw->bus.lan_id, reg_val);
936
937 return status;
938}
939
940/** ixgbe_setup_kr_x550em - Configure the KR PHY.
941 * @hw: pointer to hardware structure
942 *
943 * Configures the integrated KR PHY.
944 **/
945s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
946{
947 s32 status;
948 u32 reg_val;
949
950 status = ixgbe_read_iosf_sb_reg_x550(hw,
951 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
952 IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
953 if (status)
954 return status;
955
956 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
957 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
958 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
959 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
960 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
961
962 /* Advertise 10G support. */
963 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
964 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
965
966 /* Advertise 1G support. */
967 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
968 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
969
970 /* Restart auto-negotiation. */
971 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
972 status = ixgbe_write_iosf_sb_reg_x550(hw,
973 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
974 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
975
976 return status;
977}
978
979/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
980 * @hw: point to hardware structure
981 *
982 * Configures the integrated KR PHY to talk to the external PHY. The base
983 * driver will call this function when it gets notification via interrupt from
984 * the external PHY. This function forces the internal PHY into iXFI mode at
985 * the correct speed.
986 *
987 * A return of a non-zero value indicates an error, and the base driver should
988 * not report link up.
989 **/
990s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
991{
992 u32 status;
993 u16 lasi, autoneg_status, speed;
994 ixgbe_link_speed force_speed;
995
996 /* Verify that the external link status has changed */
997 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
998 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
999 if (status)
1000 return status;
1001
1002 /* If there was no change in link status, we can just exit */
1003 if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
1004 return 0;
1005
1006 /* we read this twice back to back to indicate current status */
1007 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1008 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1009 &autoneg_status);
1010 if (status)
1011 return status;
1012
1013 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1014 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1015 &autoneg_status);
1016 if (status)
1017 return status;
1018
1019 /* If link is not up return an error indicating treat link as down */
1020 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
1021 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1022
1023 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
1024 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1025 &speed);
1026
1027 /* clear everything but the speed and duplex bits */
1028 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
1029
1030 switch (speed) {
1031 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
1032 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
1033 break;
1034 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
1035 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
1036 break;
1037 default:
1038 /* Internal PHY does not support anything else */
1039 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1040 }
1041
1042 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
1043}
1044
1045/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
1046 * @hw: pointer to hardware structure
1047 *
1048 * Initialize any function pointers that were not able to be
1049 * set during init_shared_code because the PHY/SFP type was
1050 * not known. Perform the SFP init if necessary.
1051 **/
1052s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1053{
1054 struct ixgbe_phy_info *phy = &hw->phy;
1055 s32 ret_val;
1056 u32 esdp;
1057
1058 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1059 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1060 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
1061
1062 if (hw->bus.lan_id) {
1063 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
1064 esdp |= IXGBE_ESDP_SDP1_DIR;
1065 }
1066 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
1067 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
1068 }
1069
1070 /* Identify the PHY or SFP module */
1071 ret_val = phy->ops.identify(hw);
1072
1073 /* Setup function pointers based on detected SFP module and speeds */
1074 ixgbe_init_mac_link_ops_X550em(hw);
1075 if (phy->sfp_type != ixgbe_sfp_type_unknown)
1076 phy->ops.reset = NULL;
1077
1078 /* Set functions pointers based on phy type */
1079 switch (hw->phy.type) {
1080 case ixgbe_phy_x550em_kx4:
1081 phy->ops.setup_link = ixgbe_setup_kx4_x550em;
1082 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
1083 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
1084 break;
1085 case ixgbe_phy_x550em_kr:
1086 phy->ops.setup_link = ixgbe_setup_kr_x550em;
1087 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
1088 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
1089 break;
1090 case ixgbe_phy_x550em_ext_t:
1091 phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
1092 break;
1093 default:
1094 break;
1095 }
1096 return ret_val;
1097}
1098
1099/** ixgbe_get_media_type_X550em - Get media type
1100 * @hw: pointer to hardware structure
1101 *
1102 * Returns the media type (fiber, copper, backplane)
1103 *
1104 */
1105enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1106{
1107 enum ixgbe_media_type media_type;
1108
1109 /* Detect if there is a copper PHY attached. */
1110 switch (hw->device_id) {
1111 case IXGBE_DEV_ID_X550EM_X_KR:
1112 case IXGBE_DEV_ID_X550EM_X_KX4:
1113 media_type = ixgbe_media_type_backplane;
1114 break;
1115 case IXGBE_DEV_ID_X550EM_X_SFP:
1116 media_type = ixgbe_media_type_fiber;
1117 break;
1118 case IXGBE_DEV_ID_X550EM_X_1G_T:
1119 case IXGBE_DEV_ID_X550EM_X_10G_T:
1120 media_type = ixgbe_media_type_copper;
1121 break;
1122 default:
1123 media_type = ixgbe_media_type_unknown;
1124 break;
1125 }
1126 return media_type;
1127}
1128
1129/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
1130 ** @hw: pointer to hardware structure
1131 **/
1132s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1133{
1134 u32 status;
1135 u16 reg;
1136 u32 retries = 2;
1137
1138 do {
1139 /* decrement retries counter and exit if we hit 0 */
1140 if (retries < 1) {
1141 hw_dbg(hw, "External PHY not yet finished resetting.");
1142 return IXGBE_ERR_PHY;
1143 }
1144 retries--;
1145
1146 status = hw->phy.ops.read_reg(hw,
1147 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
1148 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1149 &reg);
1150 if (status)
1151 return status;
1152
1153 /* Verify PHY FW reset has completed */
1154 } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
1155
1156 /* Set port to low power mode */
1157 status = hw->phy.ops.read_reg(hw,
1158 IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
1159 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1160 &reg);
1161 if (status)
1162 return status;
1163
1164 /* Enable the transmitter */
1165 status = hw->phy.ops.read_reg(hw,
1166 IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
1167 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1168 &reg);
1169 if (status)
1170 return status;
1171
1172 reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
1173
1174 status = hw->phy.ops.write_reg(hw,
1175 IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
1176 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1177 reg);
1178 if (status)
1179 return status;
1180
1181 /* Un-stall the PHY FW */
1182 status = hw->phy.ops.read_reg(hw,
1183 IXGBE_MDIO_GLOBAL_RES_PR_10,
1184 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1185 &reg);
1186 if (status)
1187 return status;
1188
1189 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
1190
1191 status = hw->phy.ops.write_reg(hw,
1192 IXGBE_MDIO_GLOBAL_RES_PR_10,
1193 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1194 reg);
1195 return status;
1196}
1197
1198/** ixgbe_reset_hw_X550em - Perform hardware reset
1199 ** @hw: pointer to hardware structure
1200 **
1201 ** Resets the hardware by resetting the transmit and receive units, masks
1202 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1203 ** reset.
1204 **/
1205s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
1206{
1207 ixgbe_link_speed link_speed;
1208 s32 status;
1209 u32 ctrl = 0;
1210 u32 i;
1211 bool link_up = false;
1212
1213 /* Call adapter stop to disable Tx/Rx and clear interrupts */
1214 status = hw->mac.ops.stop_adapter(hw);
1215 if (status)
1216 return status;
1217
1218 /* flush pending Tx transactions */
1219 ixgbe_clear_tx_pending(hw);
1220
1221 /* PHY ops must be identified and initialized prior to reset */
1222
1223 /* Identify PHY and related function pointers */
1224 status = hw->phy.ops.init(hw);
1225
1226 /* start the external PHY */
1227 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
1228 status = ixgbe_init_ext_t_x550em(hw);
1229 if (status)
1230 return status;
1231 }
1232
1233 /* Setup SFP module if there is one present. */
1234 if (hw->phy.sfp_setup_needed) {
1235 status = hw->mac.ops.setup_sfp(hw);
1236 hw->phy.sfp_setup_needed = false;
1237 }
1238
1239 /* Reset PHY */
1240 if (!hw->phy.reset_disable && hw->phy.ops.reset)
1241 hw->phy.ops.reset(hw);
1242
1243mac_reset_top:
1244 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
1245 * If link reset is used when link is up, it might reset the PHY when
1246 * mng is using it. If link is down or the flag to force full link
1247 * reset is set, then perform link reset.
1248 */
1249 ctrl = IXGBE_CTRL_LNK_RST;
1250
1251 if (!hw->force_full_reset) {
1252 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1253 if (link_up)
1254 ctrl = IXGBE_CTRL_RST;
1255 }
1256
1257 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1258 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1259 IXGBE_WRITE_FLUSH(hw);
1260
1261 /* Poll for reset bit to self-clear meaning reset is complete */
1262 for (i = 0; i < 10; i++) {
1263 udelay(1);
1264 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1265 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1266 break;
1267 }
1268
1269 if (ctrl & IXGBE_CTRL_RST_MASK) {
1270 status = IXGBE_ERR_RESET_FAILED;
1271 hw_dbg(hw, "Reset polling failed to complete.\n");
1272 }
1273
1274 msleep(50);
1275
1276 /* Double resets are required for recovery from certain error
1277 * clear the multicast table. Also reset num_rar_entries to 128,
1278 * since we modify this value when programming the SAN MAC address.
1279 */
1280 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1281 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1282 goto mac_reset_top;
1283 }
1284
1285 /* Store the permanent mac address */
1286 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1287
1288 /* Store MAC address from RAR0, clear receive address registers, and
1289 * clear the multicast table. Also reset num_rar_entries to 128,
1290 * since we modify this value when programming the SAN MAC address.
1291 */
1292 hw->mac.num_rar_entries = 128;
1293 hw->mac.ops.init_rx_addrs(hw);
1294
1295 return status;
1296}
1297
1298#define X550_COMMON_MAC \
1299 .init_hw = &ixgbe_init_hw_generic, \
1300 .start_hw = &ixgbe_start_hw_X540, \
1301 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \
1302 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \
1303 .get_mac_addr = &ixgbe_get_mac_addr_generic, \
1304 .get_device_caps = &ixgbe_get_device_caps_generic, \
1305 .stop_adapter = &ixgbe_stop_adapter_generic, \
1306 .get_bus_info = &ixgbe_get_bus_info_generic, \
1307 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
1308 .read_analog_reg8 = NULL, \
1309 .write_analog_reg8 = NULL, \
1310 .set_rxpba = &ixgbe_set_rxpba_generic, \
1311 .check_link = &ixgbe_check_mac_link_generic, \
1312 .led_on = &ixgbe_led_on_generic, \
1313 .led_off = &ixgbe_led_off_generic, \
1314 .blink_led_start = &ixgbe_blink_led_start_X540, \
1315 .blink_led_stop = &ixgbe_blink_led_stop_X540, \
1316 .set_rar = &ixgbe_set_rar_generic, \
1317 .clear_rar = &ixgbe_clear_rar_generic, \
1318 .set_vmdq = &ixgbe_set_vmdq_generic, \
1319 .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \
1320 .clear_vmdq = &ixgbe_clear_vmdq_generic, \
1321 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \
1322 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \
1323 .enable_mc = &ixgbe_enable_mc_generic, \
1324 .disable_mc = &ixgbe_disable_mc_generic, \
1325 .clear_vfta = &ixgbe_clear_vfta_generic, \
1326 .set_vfta = &ixgbe_set_vfta_generic, \
1327 .fc_enable = &ixgbe_fc_enable_generic, \
1328 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \
1329 .init_uta_tables = &ixgbe_init_uta_tables_generic, \
1330 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
1331 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
1332 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
1333 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
1334 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
1335 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
1336 .get_thermal_sensor_data = NULL, \
1337 .init_thermal_sensor_thresh = NULL, \
1338 .prot_autoc_read = &prot_autoc_read_generic, \
1339 .prot_autoc_write = &prot_autoc_write_generic, \
1340
1341static struct ixgbe_mac_operations mac_ops_X550 = {
1342 X550_COMMON_MAC
1343 .reset_hw = &ixgbe_reset_hw_X540,
1344 .get_media_type = &ixgbe_get_media_type_X540,
1345 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
1346 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
1347 .setup_link = &ixgbe_setup_mac_link_X540,
1348 .set_rxpba = &ixgbe_set_rxpba_generic,
1349 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
1350 .setup_sfp = NULL,
1351};
1352
1353static struct ixgbe_mac_operations mac_ops_X550EM_x = {
1354 X550_COMMON_MAC
1355 .reset_hw = &ixgbe_reset_hw_X550em,
1356 .get_media_type = &ixgbe_get_media_type_X550em,
1357 .get_san_mac_addr = NULL,
1358 .get_wwn_prefix = NULL,
1359 .setup_link = NULL, /* defined later */
1360 .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
1361 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
1362
1363};
1364
1365#define X550_COMMON_EEP \
1366 .read = &ixgbe_read_ee_hostif_X550, \
1367 .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
1368 .write = &ixgbe_write_ee_hostif_X550, \
1369 .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \
1370 .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
1371 .update_checksum = &ixgbe_update_eeprom_checksum_X550, \
1372 .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
1373
1374static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
1375 X550_COMMON_EEP
1376 .init_params = &ixgbe_init_eeprom_params_X550,
1377};
1378
1379static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
1380 X550_COMMON_EEP
1381 .init_params = &ixgbe_init_eeprom_params_X540,
1382};
1383
1384#define X550_COMMON_PHY \
1385 .identify_sfp = &ixgbe_identify_module_generic, \
1386 .reset = NULL, \
1387 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \
1388 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \
1389 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \
1390 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
1391 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
1392 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
1393 .check_overtemp = &ixgbe_tn_check_overtemp, \
1394 .get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
1395
1396static struct ixgbe_phy_operations phy_ops_X550 = {
1397 X550_COMMON_PHY
1398 .init = NULL,
1399 .identify = &ixgbe_identify_phy_generic,
1400 .read_reg = &ixgbe_read_phy_reg_generic,
1401 .write_reg = &ixgbe_write_phy_reg_generic,
1402 .setup_link = &ixgbe_setup_phy_link_generic,
1403 .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
1404 .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
1405};
1406
1407static struct ixgbe_phy_operations phy_ops_X550EM_x = {
1408 X550_COMMON_PHY
1409 .init = &ixgbe_init_phy_ops_X550em,
1410 .identify = &ixgbe_identify_phy_x550em,
1411 .read_reg = NULL, /* defined later */
1412 .write_reg = NULL, /* defined later */
1413 .setup_link = NULL, /* defined later */
1414};
1415
1416struct ixgbe_info ixgbe_X550_info = {
1417 .mac = ixgbe_mac_X550,
1418 .get_invariants = &ixgbe_get_invariants_X540,
1419 .mac_ops = &mac_ops_X550,
1420 .eeprom_ops = &eeprom_ops_X550,
1421 .phy_ops = &phy_ops_X550,
1422 .mbx_ops = &mbx_ops_generic,
1423};
1424
1425struct ixgbe_info ixgbe_X550EM_x_info = {
1426 .mac = ixgbe_mac_X550EM_x,
1427 .get_invariants = &ixgbe_get_invariants_X540,
1428 .mac_ops = &mac_ops_X550EM_x,
1429 .eeprom_ops = &eeprom_ops_X550EM_x,
1430 .phy_ops = &phy_ops_X550EM_x,
1431 .mbx_ops = &mbx_ops_generic,
1432};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 05e4f32d84f7..7412d378b77b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -31,6 +31,8 @@
31/* Device IDs */ 31/* Device IDs */
32#define IXGBE_DEV_ID_82599_VF 0x10ED 32#define IXGBE_DEV_ID_82599_VF 0x10ED
33#define IXGBE_DEV_ID_X540_VF 0x1515 33#define IXGBE_DEV_ID_X540_VF 0x1515
34#define IXGBE_DEV_ID_X550_VF 0x1565
35#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
34 36
35#define IXGBE_VF_IRQ_CLEAR_MASK 7 37#define IXGBE_VF_IRQ_CLEAR_MASK 7
36#define IXGBE_VF_MAX_TX_QUEUES 8 38#define IXGBE_VF_MAX_TX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index bb6726cbeb86..8c44ab25f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -432,10 +432,14 @@ enum ixbgevf_state_t {
432enum ixgbevf_boards { 432enum ixgbevf_boards {
433 board_82599_vf, 433 board_82599_vf,
434 board_X540_vf, 434 board_X540_vf,
435 board_X550_vf,
436 board_X550EM_x_vf,
435}; 437};
436 438
437extern const struct ixgbevf_info ixgbevf_82599_vf_info; 439extern const struct ixgbevf_info ixgbevf_82599_vf_info;
438extern const struct ixgbevf_info ixgbevf_X540_vf_info; 440extern const struct ixgbevf_info ixgbevf_X540_vf_info;
441extern const struct ixgbevf_info ixgbevf_X550_vf_info;
442extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
439extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; 443extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
440 444
441/* needed by ethtool.c */ 445/* needed by ethtool.c */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 755f71f07ae1..3b0ddf757fb6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -66,6 +66,8 @@ static char ixgbevf_copyright[] =
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info, 67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info,
69 [board_X550_vf] = &ixgbevf_X550_vf_info,
70 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
69}; 71};
70 72
71/* ixgbevf_pci_tbl - PCI Device ID Table 73/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
79static const struct pci_device_id ixgbevf_pci_tbl[] = { 81static const struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
84 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
82 /* required last entry */ 86 /* required last entry */
83 {0, } 87 {0, }
84}; 88};
@@ -3529,7 +3533,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3529 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3533 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3530 break; 3534 break;
3531 default: 3535 default:
3532 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3536 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
3533 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3537 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3534 break; 3538 break;
3535 } 3539 }
@@ -3733,6 +3737,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3733 struct ixgbe_hw *hw = NULL; 3737 struct ixgbe_hw *hw = NULL;
3734 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3738 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3735 int err, pci_using_dac; 3739 int err, pci_using_dac;
3740 bool disable_dev = false;
3736 3741
3737 err = pci_enable_device(pdev); 3742 err = pci_enable_device(pdev);
3738 if (err) 3743 if (err)
@@ -3767,7 +3772,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3767 3772
3768 SET_NETDEV_DEV(netdev, &pdev->dev); 3773 SET_NETDEV_DEV(netdev, &pdev->dev);
3769 3774
3770 pci_set_drvdata(pdev, netdev);
3771 adapter = netdev_priv(netdev); 3775 adapter = netdev_priv(netdev);
3772 3776
3773 adapter->netdev = netdev; 3777 adapter->netdev = netdev;
@@ -3856,16 +3860,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3856 if (err) 3860 if (err)
3857 goto err_register; 3861 goto err_register;
3858 3862
3863 pci_set_drvdata(pdev, netdev);
3859 netif_carrier_off(netdev); 3864 netif_carrier_off(netdev);
3860 3865
3861 ixgbevf_init_last_counter_stats(adapter); 3866 ixgbevf_init_last_counter_stats(adapter);
3862 3867
3863 /* print the MAC address */ 3868 /* print the VF info */
3864 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3869 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
3870 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
3865 3871
3866 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3872 switch (hw->mac.type) {
3873 case ixgbe_mac_X550_vf:
3874 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
3875 break;
3876 case ixgbe_mac_X540_vf:
3877 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
3878 break;
3879 case ixgbe_mac_82599_vf:
3880 default:
3881 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
3882 break;
3883 }
3867 3884
3868 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3869 return 0; 3885 return 0;
3870 3886
3871err_register: 3887err_register:
@@ -3874,12 +3890,13 @@ err_sw_init:
3874 ixgbevf_reset_interrupt_capability(adapter); 3890 ixgbevf_reset_interrupt_capability(adapter);
3875 iounmap(adapter->io_addr); 3891 iounmap(adapter->io_addr);
3876err_ioremap: 3892err_ioremap:
3893 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
3877 free_netdev(netdev); 3894 free_netdev(netdev);
3878err_alloc_etherdev: 3895err_alloc_etherdev:
3879 pci_release_regions(pdev); 3896 pci_release_regions(pdev);
3880err_pci_reg: 3897err_pci_reg:
3881err_dma: 3898err_dma:
3882 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3899 if (!adapter || disable_dev)
3883 pci_disable_device(pdev); 3900 pci_disable_device(pdev);
3884 return err; 3901 return err;
3885} 3902}
@@ -3896,7 +3913,13 @@ err_dma:
3896static void ixgbevf_remove(struct pci_dev *pdev) 3913static void ixgbevf_remove(struct pci_dev *pdev)
3897{ 3914{
3898 struct net_device *netdev = pci_get_drvdata(pdev); 3915 struct net_device *netdev = pci_get_drvdata(pdev);
3899 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3916 struct ixgbevf_adapter *adapter;
3917 bool disable_dev;
3918
3919 if (!netdev)
3920 return;
3921
3922 adapter = netdev_priv(netdev);
3900 3923
3901 set_bit(__IXGBEVF_REMOVING, &adapter->state); 3924 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3902 3925
@@ -3916,9 +3939,10 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3916 3939
3917 hw_dbg(&adapter->hw, "Remove complete\n"); 3940 hw_dbg(&adapter->hw, "Remove complete\n");
3918 3941
3942 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
3919 free_netdev(netdev); 3943 free_netdev(netdev);
3920 3944
3921 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) 3945 if (disable_dev)
3922 pci_disable_device(pdev); 3946 pci_disable_device(pdev);
3923} 3947}
3924 3948
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 9cddd56d02c3..cdb53be7d995 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = {
617 .mac = ixgbe_mac_X540_vf, 617 .mac = ixgbe_mac_X540_vf,
618 .mac_ops = &ixgbevf_mac_ops, 618 .mac_ops = &ixgbevf_mac_ops,
619}; 619};
620
621const struct ixgbevf_info ixgbevf_X550_vf_info = {
622 .mac = ixgbe_mac_X550_vf,
623 .mac_ops = &ixgbevf_mac_ops,
624};
625
626const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
627 .mac = ixgbe_mac_X550EM_x_vf,
628 .mac_ops = &ixgbevf_mac_ops,
629};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index aa8cc8dc25d1..5b172427f459 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -74,6 +74,8 @@ enum ixgbe_mac_type {
74 ixgbe_mac_unknown = 0, 74 ixgbe_mac_unknown = 0,
75 ixgbe_mac_82599_vf, 75 ixgbe_mac_82599_vf,
76 ixgbe_mac_X540_vf, 76 ixgbe_mac_X540_vf,
77 ixgbe_mac_X550_vf,
78 ixgbe_mac_X550EM_x_vf,
77 ixgbe_num_macs 79 ixgbe_num_macs
78}; 80};
79 81