aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-02-24 11:48:48 -0500
committerDavid S. Miller <davem@davemloft.net>2015-02-24 11:48:48 -0500
commit2a12d6cf751ff4a26f4054670d4f19ff315ed3b0 (patch)
treebd908b685e132a943a32de30437faaeed9a583ab /drivers/net/ethernet
parent1e0629d3f802fd30030ea2496d4db8ce324aa52f (diff)
parent54e16f64f0c494b78e3872612e45d002d220664d (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-02-23 This series contains updates to e1000e, igbvf, i40e and i40evf. David adds support for i219 devices to the e1000e driver. Jeff (me) provides two patches to cleanup igbvf, first cleans up the whitespace issues found and the second cleans up the usage of msleep(), min() and max() with usleep_range(), min_t() and max_t() respectively. Kamil updates the shadow RAM read/write functions by refactoring them to prepare for future work. Shannon renames the debugfs command "clear_stats pf" to clear_stats port" to clarify what the function really does. Mitch refactors the receive routine, by splitting the receive hot path code into two, one for packet split and one for single buffer, which improves receive performance. Disables NAPI polling sooner when closing the interface to fix an occasional panic during close which was caused by the driver trying to delete and clean rings at the same time. Also refactors reset for i40evf, since a recent change to the shutdown flow messed up the reset flow. Since i40evf_down() now holds the critical section lock, we cannot call it from the reset handler, which also holds the lock. Nicholas restricts the virtual channel opcodes should remain consistent between updates to the opcode enum. Neerav converts the VSI connection type to use a #define instead of using a magic number. Anjali updates the registers file to remove registers no longer available. Also fixes the EMPR interrupt handling, so that we won't trigger another EMPR when we receive an EMPR event. Catherine cleans up the variable an_enable since it was set and never used. Greg fixes the netdev op that allows the operator to turn MAC/VLAN spoof checking on and off so that it includes the flag for VLAN spoof checking. v2: Updated patch #10 in the series to use test_and_clear_bit() as suggested by Sergei Shtylyov ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c770
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c50
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c96
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c419
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h50
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c401
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h42
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c63
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h128
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c83
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h103
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c21
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h53
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c391
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h123
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c43
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h83
38 files changed, 2157 insertions, 1032 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index bb7ab3c321d6..0570c668ec3d 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -141,6 +141,7 @@
141#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 141#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
142#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 142#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
143#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ 143#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
144#define E1000_RCTL_RDMTS_HEX 0x00010000
144#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 145#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
145#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ 146#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
146#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 147#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9416e5a7e0c8..a69f09e37b58 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -132,6 +132,7 @@ enum e1000_boards {
132 board_pchlan, 132 board_pchlan,
133 board_pch2lan, 133 board_pch2lan,
134 board_pch_lpt, 134 board_pch_lpt,
135 board_pch_spt
135}; 136};
136 137
137struct e1000_ps_page { 138struct e1000_ps_page {
@@ -501,6 +502,7 @@ extern const struct e1000_info e1000_ich10_info;
501extern const struct e1000_info e1000_pch_info; 502extern const struct e1000_info e1000_pch_info;
502extern const struct e1000_info e1000_pch2_info; 503extern const struct e1000_info e1000_pch2_info;
503extern const struct e1000_info e1000_pch_lpt_info; 504extern const struct e1000_info e1000_pch_lpt_info;
505extern const struct e1000_info e1000_pch_spt_info;
504extern const struct e1000_info e1000_es2_info; 506extern const struct e1000_info e1000_es2_info;
505 507
506void e1000e_ptp_init(struct e1000_adapter *adapter); 508void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 865ce45f9ec3..11f486e4ff7b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
896 case e1000_pchlan: 896 case e1000_pchlan:
897 case e1000_pch2lan: 897 case e1000_pch2lan:
898 case e1000_pch_lpt: 898 case e1000_pch_lpt:
899 case e1000_pch_spt:
899 mask |= (1 << 18); 900 mask |= (1 << 18);
900 break; 901 break;
901 default: 902 default:
902 break; 903 break;
903 } 904 }
904 905
905 if (mac->type == e1000_pch_lpt) 906 if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
906 wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> 907 wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
907 E1000_FWSM_WLOCK_MAC_SHIFT; 908 E1000_FWSM_WLOCK_MAC_SHIFT;
908 909
909 for (i = 0; i < mac->rar_entry_count; i++) { 910 for (i = 0; i < mac->rar_entry_count; i++) {
910 if (mac->type == e1000_pch_lpt) { 911 if ((mac->type == e1000_pch_lpt) ||
912 (mac->type == e1000_pch_spt)) {
911 /* Cannot test write-protected SHRAL[n] registers */ 913 /* Cannot test write-protected SHRAL[n] registers */
912 if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) 914 if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
913 continue; 915 continue;
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 72f5475c4b90..19e8c487db06 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -87,6 +87,10 @@ struct e1000_hw;
87#define E1000_DEV_ID_PCH_I218_V2 0x15A1 87#define E1000_DEV_ID_PCH_I218_V2 0x15A1
88#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ 88#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
89#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ 89#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
90#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */
91#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */
92#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */
93#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */
90 94
91#define E1000_REVISION_4 4 95#define E1000_REVISION_4 4
92 96
@@ -108,6 +112,7 @@ enum e1000_mac_type {
108 e1000_pchlan, 112 e1000_pchlan,
109 e1000_pch2lan, 113 e1000_pch2lan,
110 e1000_pch_lpt, 114 e1000_pch_lpt,
115 e1000_pch_spt,
111}; 116};
112 117
113enum e1000_media_type { 118enum e1000_media_type {
@@ -153,6 +158,7 @@ enum e1000_bus_width {
153 e1000_bus_width_pcie_x1, 158 e1000_bus_width_pcie_x1,
154 e1000_bus_width_pcie_x2, 159 e1000_bus_width_pcie_x2,
155 e1000_bus_width_pcie_x4 = 4, 160 e1000_bus_width_pcie_x4 = 4,
161 e1000_bus_width_pcie_x8 = 8,
156 e1000_bus_width_32, 162 e1000_bus_width_32,
157 e1000_bus_width_64, 163 e1000_bus_width_64,
158 e1000_bus_width_reserved 164 e1000_bus_width_reserved
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 48b74a549155..7523f510c7e4 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
123 u16 *data); 123 u16 *data);
124static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 124static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
125 u8 size, u16 *data); 125 u8 size, u16 *data);
126static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
127 u32 *data);
128static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
129 u32 offset, u32 *data);
130static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
131 u32 offset, u32 data);
132static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
133 u32 offset, u32 dword);
126static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 134static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); 135static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
128static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); 136static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
@@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
229 if (ret_val) 237 if (ret_val)
230 return false; 238 return false;
231out: 239out:
232 if (hw->mac.type == e1000_pch_lpt) { 240 if ((hw->mac.type == e1000_pch_lpt) ||
241 (hw->mac.type == e1000_pch_spt)) {
233 /* Unforce SMBus mode in PHY */ 242 /* Unforce SMBus mode in PHY */
234 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); 243 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
235 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 244 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
@@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
321 */ 330 */
322 switch (hw->mac.type) { 331 switch (hw->mac.type) {
323 case e1000_pch_lpt: 332 case e1000_pch_lpt:
333 case e1000_pch_spt:
324 if (e1000_phy_is_accessible_pchlan(hw)) 334 if (e1000_phy_is_accessible_pchlan(hw))
325 break; 335 break;
326 336
@@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
461 /* fall-through */ 471 /* fall-through */
462 case e1000_pch2lan: 472 case e1000_pch2lan:
463 case e1000_pch_lpt: 473 case e1000_pch_lpt:
474 case e1000_pch_spt:
464 /* In case the PHY needs to be in mdio slow mode, 475 /* In case the PHY needs to be in mdio slow mode,
465 * set slow mode and try to get the PHY id again. 476 * set slow mode and try to get the PHY id again.
466 */ 477 */
@@ -590,35 +601,50 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
590 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 601 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
591 u32 gfpreg, sector_base_addr, sector_end_addr; 602 u32 gfpreg, sector_base_addr, sector_end_addr;
592 u16 i; 603 u16 i;
604 u32 nvm_size;
593 605
594 /* Can't read flash registers if the register set isn't mapped. */ 606 /* Can't read flash registers if the register set isn't mapped. */
595 if (!hw->flash_address) {
596 e_dbg("ERROR: Flash registers not mapped\n");
597 return -E1000_ERR_CONFIG;
598 }
599
600 nvm->type = e1000_nvm_flash_sw; 607 nvm->type = e1000_nvm_flash_sw;
608 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
609 * STRAP register
610 */
611 if (hw->mac.type == e1000_pch_spt) {
612 nvm->flash_base_addr = 0;
613 nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
614 * NVM_SIZE_MULTIPLIER;
615 nvm->flash_bank_size = nvm_size / 2;
616 /* Adjust to word count */
617 nvm->flash_bank_size /= sizeof(u16);
618 /* Set the base address for flash register access */
619 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
620 } else {
621 if (!hw->flash_address) {
622 e_dbg("ERROR: Flash registers not mapped\n");
623 return -E1000_ERR_CONFIG;
624 }
601 625
602 gfpreg = er32flash(ICH_FLASH_GFPREG); 626 gfpreg = er32flash(ICH_FLASH_GFPREG);
603 627
604 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 628 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
605 * Add 1 to sector_end_addr since this sector is included in 629 * Add 1 to sector_end_addr since this sector is included in
606 * the overall size. 630 * the overall size.
607 */ 631 */
608 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 632 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
609 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 633 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
610 634
611 /* flash_base_addr is byte-aligned */ 635 /* flash_base_addr is byte-aligned */
612 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 636 nvm->flash_base_addr = sector_base_addr
637 << FLASH_SECTOR_ADDR_SHIFT;
613 638
614 /* find total size of the NVM, then cut in half since the total 639 /* find total size of the NVM, then cut in half since the total
615 * size represents two separate NVM banks. 640 * size represents two separate NVM banks.
616 */ 641 */
617 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) 642 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
618 << FLASH_SECTOR_ADDR_SHIFT); 643 << FLASH_SECTOR_ADDR_SHIFT);
619 nvm->flash_bank_size /= 2; 644 nvm->flash_bank_size /= 2;
620 /* Adjust to word count */ 645 /* Adjust to word count */
621 nvm->flash_bank_size /= sizeof(u16); 646 nvm->flash_bank_size /= sizeof(u16);
647 }
622 648
623 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; 649 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
624 650
@@ -682,6 +708,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
682 mac->ops.rar_set = e1000_rar_set_pch2lan; 708 mac->ops.rar_set = e1000_rar_set_pch2lan;
683 /* fall-through */ 709 /* fall-through */
684 case e1000_pch_lpt: 710 case e1000_pch_lpt:
711 case e1000_pch_spt:
685 case e1000_pchlan: 712 case e1000_pchlan:
686 /* check management mode */ 713 /* check management mode */
687 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 714 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -699,7 +726,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
699 break; 726 break;
700 } 727 }
701 728
702 if (mac->type == e1000_pch_lpt) { 729 if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
703 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 730 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
704 mac->ops.rar_set = e1000_rar_set_pch_lpt; 731 mac->ops.rar_set = e1000_rar_set_pch_lpt;
705 mac->ops.setup_physical_interface = 732 mac->ops.setup_physical_interface =
@@ -919,8 +946,9 @@ release:
919 /* clear FEXTNVM6 bit 8 on link down or 10/100 */ 946 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
920 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; 947 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
921 948
922 if (!link || ((status & E1000_STATUS_SPEED_100) && 949 if ((hw->phy.revision > 5) || !link ||
923 (status & E1000_STATUS_FD))) 950 ((status & E1000_STATUS_SPEED_100) &&
951 (status & E1000_STATUS_FD)))
924 goto update_fextnvm6; 952 goto update_fextnvm6;
925 953
926 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg); 954 ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
@@ -1100,6 +1128,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1100 if (ret_val) 1128 if (ret_val)
1101 goto out; 1129 goto out;
1102 1130
1131 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
1132 * LPLU and disable Gig speed when entering ULP
1133 */
1134 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1135 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1136 &phy_reg);
1137 if (ret_val)
1138 goto release;
1139 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1140 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1141 phy_reg);
1142 if (ret_val)
1143 goto release;
1144 }
1145
1103 /* Force SMBus mode in PHY */ 1146 /* Force SMBus mode in PHY */
1104 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1147 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1105 if (ret_val) 1148 if (ret_val)
@@ -1302,7 +1345,8 @@ out:
1302static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1345static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1303{ 1346{
1304 struct e1000_mac_info *mac = &hw->mac; 1347 struct e1000_mac_info *mac = &hw->mac;
1305 s32 ret_val; 1348 s32 ret_val, tipg_reg = 0;
1349 u16 emi_addr, emi_val = 0;
1306 bool link; 1350 bool link;
1307 u16 phy_reg; 1351 u16 phy_reg;
1308 1352
@@ -1333,48 +1377,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1333 * the IPG and reduce Rx latency in the PHY. 1377 * the IPG and reduce Rx latency in the PHY.
1334 */ 1378 */
1335 if (((hw->mac.type == e1000_pch2lan) || 1379 if (((hw->mac.type == e1000_pch2lan) ||
1336 (hw->mac.type == e1000_pch_lpt)) && link) { 1380 (hw->mac.type == e1000_pch_lpt) ||
1381 (hw->mac.type == e1000_pch_spt)) && link) {
1337 u32 reg; 1382 u32 reg;
1338 1383
1339 reg = er32(STATUS); 1384 reg = er32(STATUS);
1385 tipg_reg = er32(TIPG);
1386 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1387
1340 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1388 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1341 u16 emi_addr; 1389 tipg_reg |= 0xFF;
1390 /* Reduce Rx latency in analog PHY */
1391 emi_val = 0;
1392 } else {
1342 1393
1343 reg = er32(TIPG); 1394 /* Roll back the default values */
1344 reg &= ~E1000_TIPG_IPGT_MASK; 1395 tipg_reg |= 0x08;
1345 reg |= 0xFF; 1396 emi_val = 1;
1346 ew32(TIPG, reg); 1397 }
1347 1398
1348 /* Reduce Rx latency in analog PHY */ 1399 ew32(TIPG, tipg_reg);
1349 ret_val = hw->phy.ops.acquire(hw);
1350 if (ret_val)
1351 return ret_val;
1352 1400
1353 if (hw->mac.type == e1000_pch2lan) 1401 ret_val = hw->phy.ops.acquire(hw);
1354 emi_addr = I82579_RX_CONFIG; 1402 if (ret_val)
1355 else 1403 return ret_val;
1356 emi_addr = I217_RX_CONFIG;
1357 1404
1358 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); 1405 if (hw->mac.type == e1000_pch2lan)
1406 emi_addr = I82579_RX_CONFIG;
1407 else
1408 emi_addr = I217_RX_CONFIG;
1409 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1359 1410
1360 hw->phy.ops.release(hw); 1411 hw->phy.ops.release(hw);
1361 1412
1362 if (ret_val) 1413 if (ret_val)
1363 return ret_val; 1414 return ret_val;
1364 }
1365 } 1415 }
1366 1416
1367 /* Work-around I218 hang issue */ 1417 /* Work-around I218 hang issue */
1368 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1418 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1369 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || 1419 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1370 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || 1420 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1371 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1421 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
1422 (hw->mac.type == e1000_pch_spt)) {
1372 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1423 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1373 if (ret_val) 1424 if (ret_val)
1374 return ret_val; 1425 return ret_val;
1375 } 1426 }
1376 1427 if ((hw->mac.type == e1000_pch_lpt) ||
1377 if (hw->mac.type == e1000_pch_lpt) { 1428 (hw->mac.type == e1000_pch_spt)) {
1378 /* Set platform power management values for 1429 /* Set platform power management values for
1379 * Latency Tolerance Reporting (LTR) 1430 * Latency Tolerance Reporting (LTR)
1380 */ 1431 */
@@ -1386,6 +1437,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1386 /* Clear link partner's EEE ability */ 1437 /* Clear link partner's EEE ability */
1387 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1438 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1388 1439
1440 /* FEXTNVM6 K1-off workaround */
1441 if (hw->mac.type == e1000_pch_spt) {
1442 u32 pcieanacfg = er32(PCIEANACFG);
1443 u32 fextnvm6 = er32(FEXTNVM6);
1444
1445 if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1446 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1447 else
1448 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1449
1450 ew32(FEXTNVM6, fextnvm6);
1451 }
1452
1389 if (!link) 1453 if (!link)
1390 return 0; /* No link detected */ 1454 return 0; /* No link detected */
1391 1455
@@ -1479,6 +1543,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1479 case e1000_pchlan: 1543 case e1000_pchlan:
1480 case e1000_pch2lan: 1544 case e1000_pch2lan:
1481 case e1000_pch_lpt: 1545 case e1000_pch_lpt:
1546 case e1000_pch_spt:
1482 rc = e1000_init_phy_params_pchlan(hw); 1547 rc = e1000_init_phy_params_pchlan(hw);
1483 break; 1548 break;
1484 default: 1549 default:
@@ -1929,6 +1994,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1929 case e1000_pchlan: 1994 case e1000_pchlan:
1930 case e1000_pch2lan: 1995 case e1000_pch2lan:
1931 case e1000_pch_lpt: 1996 case e1000_pch_lpt:
1997 case e1000_pch_spt:
1932 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 1998 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1933 break; 1999 break;
1934 default: 2000 default:
@@ -2961,6 +3027,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2961 s32 ret_val; 3027 s32 ret_val;
2962 3028
2963 switch (hw->mac.type) { 3029 switch (hw->mac.type) {
3030 /* In SPT, read from the CTRL_EXT reg instead of
3031 * accessing the sector valid bits from the nvm
3032 */
3033 case e1000_pch_spt:
3034 *bank = er32(CTRL_EXT)
3035 & E1000_CTRL_EXT_NVMVS;
3036 if ((*bank == 0) || (*bank == 1)) {
3037 e_dbg("ERROR: No valid NVM bank present\n");
3038 return -E1000_ERR_NVM;
3039 } else {
3040 *bank = *bank - 2;
3041 return 0;
3042 }
3043 break;
2964 case e1000_ich8lan: 3044 case e1000_ich8lan:
2965 case e1000_ich9lan: 3045 case e1000_ich9lan:
2966 eecd = er32(EECD); 3046 eecd = er32(EECD);
@@ -3008,6 +3088,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3008} 3088}
3009 3089
3010/** 3090/**
3091 * e1000_read_nvm_spt - NVM access for SPT
3092 * @hw: pointer to the HW structure
3093 * @offset: The offset (in bytes) of the word(s) to read.
3094 * @words: Size of data to read in words.
3095 * @data: pointer to the word(s) to read at offset.
3096 *
3097 * Reads a word(s) from the NVM
3098 **/
3099static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3100 u16 *data)
3101{
3102 struct e1000_nvm_info *nvm = &hw->nvm;
3103 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3104 u32 act_offset;
3105 s32 ret_val = 0;
3106 u32 bank = 0;
3107 u32 dword = 0;
3108 u16 offset_to_read;
3109 u16 i;
3110
3111 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3112 (words == 0)) {
3113 e_dbg("nvm parameter(s) out of bounds\n");
3114 ret_val = -E1000_ERR_NVM;
3115 goto out;
3116 }
3117
3118 nvm->ops.acquire(hw);
3119
3120 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3121 if (ret_val) {
3122 e_dbg("Could not detect valid bank, assuming bank 0\n");
3123 bank = 0;
3124 }
3125
3126 act_offset = (bank) ? nvm->flash_bank_size : 0;
3127 act_offset += offset;
3128
3129 ret_val = 0;
3130
3131 for (i = 0; i < words; i += 2) {
3132 if (words - i == 1) {
3133 if (dev_spec->shadow_ram[offset + i].modified) {
3134 data[i] =
3135 dev_spec->shadow_ram[offset + i].value;
3136 } else {
3137 offset_to_read = act_offset + i -
3138 ((act_offset + i) % 2);
3139 ret_val =
3140 e1000_read_flash_dword_ich8lan(hw,
3141 offset_to_read,
3142 &dword);
3143 if (ret_val)
3144 break;
3145 if ((act_offset + i) % 2 == 0)
3146 data[i] = (u16)(dword & 0xFFFF);
3147 else
3148 data[i] = (u16)((dword >> 16) & 0xFFFF);
3149 }
3150 } else {
3151 offset_to_read = act_offset + i;
3152 if (!(dev_spec->shadow_ram[offset + i].modified) ||
3153 !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3154 ret_val =
3155 e1000_read_flash_dword_ich8lan(hw,
3156 offset_to_read,
3157 &dword);
3158 if (ret_val)
3159 break;
3160 }
3161 if (dev_spec->shadow_ram[offset + i].modified)
3162 data[i] =
3163 dev_spec->shadow_ram[offset + i].value;
3164 else
3165 data[i] = (u16)(dword & 0xFFFF);
3166 if (dev_spec->shadow_ram[offset + i].modified)
3167 data[i + 1] =
3168 dev_spec->shadow_ram[offset + i + 1].value;
3169 else
3170 data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3171 }
3172 }
3173
3174 nvm->ops.release(hw);
3175
3176out:
3177 if (ret_val)
3178 e_dbg("NVM read error: %d\n", ret_val);
3179
3180 return ret_val;
3181}
3182
3183/**
3011 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 3184 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3012 * @hw: pointer to the HW structure 3185 * @hw: pointer to the HW structure
3013 * @offset: The offset (in bytes) of the word(s) to read. 3186 * @offset: The offset (in bytes) of the word(s) to read.
@@ -3090,8 +3263,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3090 /* Clear FCERR and DAEL in hw status by writing 1 */ 3263 /* Clear FCERR and DAEL in hw status by writing 1 */
3091 hsfsts.hsf_status.flcerr = 1; 3264 hsfsts.hsf_status.flcerr = 1;
3092 hsfsts.hsf_status.dael = 1; 3265 hsfsts.hsf_status.dael = 1;
3093 3266 if (hw->mac.type == e1000_pch_spt)
3094 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3267 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3268 else
3269 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3095 3270
3096 /* Either we should have a hardware SPI cycle in progress 3271 /* Either we should have a hardware SPI cycle in progress
3097 * bit to check against, in order to start a new cycle or 3272 * bit to check against, in order to start a new cycle or
@@ -3107,7 +3282,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3107 * Begin by setting Flash Cycle Done. 3282 * Begin by setting Flash Cycle Done.
3108 */ 3283 */
3109 hsfsts.hsf_status.flcdone = 1; 3284 hsfsts.hsf_status.flcdone = 1;
3110 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3285 if (hw->mac.type == e1000_pch_spt)
3286 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3287 else
3288 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3111 ret_val = 0; 3289 ret_val = 0;
3112 } else { 3290 } else {
3113 s32 i; 3291 s32 i;
@@ -3128,7 +3306,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3128 * now set the Flash Cycle Done. 3306 * now set the Flash Cycle Done.
3129 */ 3307 */
3130 hsfsts.hsf_status.flcdone = 1; 3308 hsfsts.hsf_status.flcdone = 1;
3131 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 3309 if (hw->mac.type == e1000_pch_spt)
3310 ew32flash(ICH_FLASH_HSFSTS,
3311 hsfsts.regval & 0xFFFF);
3312 else
3313 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3132 } else { 3314 } else {
3133 e_dbg("Flash controller busy, cannot get access\n"); 3315 e_dbg("Flash controller busy, cannot get access\n");
3134 } 3316 }
@@ -3151,9 +3333,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3151 u32 i = 0; 3333 u32 i = 0;
3152 3334
3153 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 3335 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3154 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3336 if (hw->mac.type == e1000_pch_spt)
3337 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3338 else
3339 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3155 hsflctl.hsf_ctrl.flcgo = 1; 3340 hsflctl.hsf_ctrl.flcgo = 1;
3156 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3341
3342 if (hw->mac.type == e1000_pch_spt)
3343 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3344 else
3345 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3157 3346
3158 /* wait till FDONE bit is set to 1 */ 3347 /* wait till FDONE bit is set to 1 */
3159 do { 3348 do {
@@ -3170,6 +3359,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3170} 3359}
3171 3360
3172/** 3361/**
3362 * e1000_read_flash_dword_ich8lan - Read dword from flash
3363 * @hw: pointer to the HW structure
3364 * @offset: offset to data location
3365 * @data: pointer to the location for storing the data
3366 *
3367 * Reads the flash dword at offset into data. Offset is converted
3368 * to bytes before read.
3369 **/
3370static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3371 u32 *data)
3372{
3373 /* Must convert word offset into bytes. */
3374 offset <<= 1;
3375 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3376}
3377
3378/**
3173 * e1000_read_flash_word_ich8lan - Read word from flash 3379 * e1000_read_flash_word_ich8lan - Read word from flash
3174 * @hw: pointer to the HW structure 3380 * @hw: pointer to the HW structure
3175 * @offset: offset to data location 3381 * @offset: offset to data location
@@ -3201,7 +3407,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3201 s32 ret_val; 3407 s32 ret_val;
3202 u16 word = 0; 3408 u16 word = 0;
3203 3409
3204 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); 3410 /* In SPT, only 32 bits access is supported,
3411 * so this function should not be called.
3412 */
3413 if (hw->mac.type == e1000_pch_spt)
3414 return -E1000_ERR_NVM;
3415 else
3416 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3417
3205 if (ret_val) 3418 if (ret_val)
3206 return ret_val; 3419 return ret_val;
3207 3420
@@ -3287,6 +3500,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3287} 3500}
3288 3501
3289/** 3502/**
3503 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3504 * @hw: pointer to the HW structure
3505 * @offset: The offset (in bytes) of the dword to read.
3506 * @data: Pointer to the dword to store the value read.
3507 *
3508 * Reads a byte or word from the NVM using the flash access registers.
3509 **/
3510
3511static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3512 u32 *data)
3513{
3514 union ich8_hws_flash_status hsfsts;
3515 union ich8_hws_flash_ctrl hsflctl;
3516 u32 flash_linear_addr;
3517 s32 ret_val = -E1000_ERR_NVM;
3518 u8 count = 0;
3519
3520 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3521 hw->mac.type != e1000_pch_spt)
3522 return -E1000_ERR_NVM;
3523 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3524 hw->nvm.flash_base_addr);
3525
3526 do {
3527 udelay(1);
3528 /* Steps */
3529 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3530 if (ret_val)
3531 break;
3532 /* In SPT, This register is in Lan memory space, not flash.
3533 * Therefore, only 32 bit access is supported
3534 */
3535 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3536
3537 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3538 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3539 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3540 /* In SPT, This register is in Lan memory space, not flash.
3541 * Therefore, only 32 bit access is supported
3542 */
3543 ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3544 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3545
3546 ret_val =
3547 e1000_flash_cycle_ich8lan(hw,
3548 ICH_FLASH_READ_COMMAND_TIMEOUT);
3549
3550 /* Check if FCERR is set to 1, if set to 1, clear it
3551 * and try the whole sequence a few more times, else
3552 * read in (shift in) the Flash Data0, the order is
3553 * least significant byte first msb to lsb
3554 */
3555 if (!ret_val) {
3556 *data = er32flash(ICH_FLASH_FDATA0);
3557 break;
3558 } else {
3559 /* If we've gotten here, then things are probably
3560 * completely hosed, but if the error condition is
3561 * detected, it won't hurt to give it another try...
3562 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3563 */
3564 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3565 if (hsfsts.hsf_status.flcerr) {
3566 /* Repeat for some time before giving up. */
3567 continue;
3568 } else if (!hsfsts.hsf_status.flcdone) {
3569 e_dbg("Timeout error - flash cycle did not complete.\n");
3570 break;
3571 }
3572 }
3573 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3574
3575 return ret_val;
3576}
3577
3578/**
3290 * e1000_write_nvm_ich8lan - Write word(s) to the NVM 3579 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
3291 * @hw: pointer to the HW structure 3580 * @hw: pointer to the HW structure
3292 * @offset: The offset (in bytes) of the word(s) to write. 3581 * @offset: The offset (in bytes) of the word(s) to write.
@@ -3321,7 +3610,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3321} 3610}
3322 3611
3323/** 3612/**
3324 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM 3613 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
3325 * @hw: pointer to the HW structure 3614 * @hw: pointer to the HW structure
3326 * 3615 *
3327 * The NVM checksum is updated by calling the generic update_nvm_checksum, 3616 * The NVM checksum is updated by calling the generic update_nvm_checksum,
@@ -3331,13 +3620,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3331 * After a successful commit, the shadow ram is cleared and is ready for 3620 * After a successful commit, the shadow ram is cleared and is ready for
3332 * future writes. 3621 * future writes.
3333 **/ 3622 **/
3334static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 3623static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3335{ 3624{
3336 struct e1000_nvm_info *nvm = &hw->nvm; 3625 struct e1000_nvm_info *nvm = &hw->nvm;
3337 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3626 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3338 u32 i, act_offset, new_bank_offset, old_bank_offset, bank; 3627 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3339 s32 ret_val; 3628 s32 ret_val;
3340 u16 data; 3629 u32 dword = 0;
3341 3630
3342 ret_val = e1000e_update_nvm_checksum_generic(hw); 3631 ret_val = e1000e_update_nvm_checksum_generic(hw);
3343 if (ret_val) 3632 if (ret_val)
@@ -3371,12 +3660,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3371 if (ret_val) 3660 if (ret_val)
3372 goto release; 3661 goto release;
3373 } 3662 }
3374 3663 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3375 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3376 /* Determine whether to write the value stored 3664 /* Determine whether to write the value stored
3377 * in the other NVM bank or a modified value stored 3665 * in the other NVM bank or a modified value stored
3378 * in the shadow RAM 3666 * in the shadow RAM
3379 */ 3667 */
3668 ret_val = e1000_read_flash_dword_ich8lan(hw,
3669 i + old_bank_offset,
3670 &dword);
3671
3672 if (dev_spec->shadow_ram[i].modified) {
3673 dword &= 0xffff0000;
3674 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3675 }
3676 if (dev_spec->shadow_ram[i + 1].modified) {
3677 dword &= 0x0000ffff;
3678 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3679 << 16);
3680 }
3681 if (ret_val)
3682 break;
3683
3684 /* If the word is 0x13, then make sure the signature bits
3685 * (15:14) are 11b until the commit has completed.
3686 * This will allow us to write 10b which indicates the
3687 * signature is valid. We want to do this after the write
3688 * has completed so that we don't mark the segment valid
3689 * while the write is still in progress
3690 */
3691 if (i == E1000_ICH_NVM_SIG_WORD - 1)
3692 dword |= E1000_ICH_NVM_SIG_MASK << 16;
3693
3694 /* Convert offset to bytes. */
3695 act_offset = (i + new_bank_offset) << 1;
3696
3697 usleep_range(100, 200);
3698
3699 /* Write the data to the new bank. Offset in words */
3700 act_offset = i + new_bank_offset;
3701 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3702 dword);
3703 if (ret_val)
3704 break;
3705 }
3706
3707 /* Don't bother writing the segment valid bits if sector
3708 * programming failed.
3709 */
3710 if (ret_val) {
3711 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3712 e_dbg("Flash commit failed.\n");
3713 goto release;
3714 }
3715
3716 /* Finally validate the new segment by setting bit 15:14
3717 * to 10b in word 0x13 , this can be done without an
3718 * erase as well since these bits are 11 to start with
3719 * and we need to change bit 14 to 0b
3720 */
3721 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3722
3723 /*offset in words but we read dword */
3724 --act_offset;
3725 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3726
3727 if (ret_val)
3728 goto release;
3729
3730 dword &= 0xBFFFFFFF;
3731 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3732
3733 if (ret_val)
3734 goto release;
3735
3736 /* And invalidate the previously valid segment by setting
3737 * its signature word (0x13) high_byte to 0b. This can be
3738 * done without an erase because flash erase sets all bits
3739 * to 1's. We can write 1's to 0's without an erase
3740 */
3741 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3742
3743 /* offset in words but we read dword */
3744 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3745 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3746
3747 if (ret_val)
3748 goto release;
3749
3750 dword &= 0x00FFFFFF;
3751 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3752
3753 if (ret_val)
3754 goto release;
3755
3756 /* Great! Everything worked, we can now clear the cached entries. */
3757 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3758 dev_spec->shadow_ram[i].modified = false;
3759 dev_spec->shadow_ram[i].value = 0xFFFF;
3760 }
3761
3762release:
3763 nvm->ops.release(hw);
3764
3765 /* Reload the EEPROM, or else modifications will not appear
3766 * until after the next adapter reset.
3767 */
3768 if (!ret_val) {
3769 nvm->ops.reload(hw);
3770 usleep_range(10000, 20000);
3771 }
3772
3773out:
3774 if (ret_val)
3775 e_dbg("NVM update error: %d\n", ret_val);
3776
3777 return ret_val;
3778}
3779
3780/**
3781 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3782 * @hw: pointer to the HW structure
3783 *
3784 * The NVM checksum is updated by calling the generic update_nvm_checksum,
3785 * which writes the checksum to the shadow ram. The changes in the shadow
3786 * ram are then committed to the EEPROM by processing each bank at a time
3787 * checking for the modified bit and writing only the pending changes.
3788 * After a successful commit, the shadow ram is cleared and is ready for
3789 * future writes.
3790 **/
3791static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3792{
3793 struct e1000_nvm_info *nvm = &hw->nvm;
3794 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3795 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3796 s32 ret_val;
3797 u16 data = 0;
3798
3799 ret_val = e1000e_update_nvm_checksum_generic(hw);
3800 if (ret_val)
3801 goto out;
3802
3803 if (nvm->type != e1000_nvm_flash_sw)
3804 goto out;
3805
3806 nvm->ops.acquire(hw);
3807
3808 /* We're writing to the opposite bank so if we're on bank 1,
3809 * write to bank 0 etc. We also need to erase the segment that
3810 * is going to be written
3811 */
3812 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3813 if (ret_val) {
3814 e_dbg("Could not detect valid bank, assuming bank 0\n");
3815 bank = 0;
3816 }
3817
3818 if (bank == 0) {
3819 new_bank_offset = nvm->flash_bank_size;
3820 old_bank_offset = 0;
3821 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3822 if (ret_val)
3823 goto release;
3824 } else {
3825 old_bank_offset = nvm->flash_bank_size;
3826 new_bank_offset = 0;
3827 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3828 if (ret_val)
3829 goto release;
3830 }
3831 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3380 if (dev_spec->shadow_ram[i].modified) { 3832 if (dev_spec->shadow_ram[i].modified) {
3381 data = dev_spec->shadow_ram[i].value; 3833 data = dev_spec->shadow_ram[i].value;
3382 } else { 3834 } else {
@@ -3498,6 +3950,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3498 */ 3950 */
3499 switch (hw->mac.type) { 3951 switch (hw->mac.type) {
3500 case e1000_pch_lpt: 3952 case e1000_pch_lpt:
3953 case e1000_pch_spt:
3501 word = NVM_COMPAT; 3954 word = NVM_COMPAT;
3502 valid_csum_mask = NVM_COMPAT_VALID_CSUM; 3955 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3503 break; 3956 break;
@@ -3583,9 +4036,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3583 s32 ret_val; 4036 s32 ret_val;
3584 u8 count = 0; 4037 u8 count = 0;
3585 4038
3586 if (size < 1 || size > 2 || data > size * 0xff || 4039 if (hw->mac.type == e1000_pch_spt) {
3587 offset > ICH_FLASH_LINEAR_ADDR_MASK) 4040 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3588 return -E1000_ERR_NVM; 4041 return -E1000_ERR_NVM;
4042 } else {
4043 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4044 return -E1000_ERR_NVM;
4045 }
3589 4046
3590 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + 4047 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3591 hw->nvm.flash_base_addr); 4048 hw->nvm.flash_base_addr);
@@ -3596,12 +4053,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3596 ret_val = e1000_flash_cycle_init_ich8lan(hw); 4053 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3597 if (ret_val) 4054 if (ret_val)
3598 break; 4055 break;
4056 /* In SPT, This register is in Lan memory space, not
4057 * flash. Therefore, only 32 bit access is supported
4058 */
4059 if (hw->mac.type == e1000_pch_spt)
4060 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4061 else
4062 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3599 4063
3600 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3601 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 4064 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3602 hsflctl.hsf_ctrl.fldbcount = size - 1; 4065 hsflctl.hsf_ctrl.fldbcount = size - 1;
3603 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 4066 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3604 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 4067 /* In SPT, This register is in Lan memory space,
4068 * not flash. Therefore, only 32 bit access is
4069 * supported
4070 */
4071 if (hw->mac.type == e1000_pch_spt)
4072 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4073 else
4074 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3605 4075
3606 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 4076 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3607 4077
@@ -3640,6 +4110,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3640} 4110}
3641 4111
3642/** 4112/**
4113* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4114* @hw: pointer to the HW structure
4115* @offset: The offset (in bytes) of the dwords to read.
4116* @data: The 4 bytes to write to the NVM.
4117*
4118* Writes one/two/four bytes to the NVM using the flash access registers.
4119**/
4120static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4121 u32 data)
4122{
4123 union ich8_hws_flash_status hsfsts;
4124 union ich8_hws_flash_ctrl hsflctl;
4125 u32 flash_linear_addr;
4126 s32 ret_val;
4127 u8 count = 0;
4128
4129 if (hw->mac.type == e1000_pch_spt) {
4130 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4131 return -E1000_ERR_NVM;
4132 }
4133 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4134 hw->nvm.flash_base_addr);
4135 do {
4136 udelay(1);
4137 /* Steps */
4138 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4139 if (ret_val)
4140 break;
4141
4142 /* In SPT, This register is in Lan memory space, not
4143 * flash. Therefore, only 32 bit access is supported
4144 */
4145 if (hw->mac.type == e1000_pch_spt)
4146 hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4147 >> 16;
4148 else
4149 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4150
4151 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4152 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4153
4154 /* In SPT, This register is in Lan memory space,
4155 * not flash. Therefore, only 32 bit access is
4156 * supported
4157 */
4158 if (hw->mac.type == e1000_pch_spt)
4159 ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4160 else
4161 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4162
4163 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4164
4165 ew32flash(ICH_FLASH_FDATA0, data);
4166
4167 /* check if FCERR is set to 1 , if set to 1, clear it
4168 * and try the whole sequence a few more times else done
4169 */
4170 ret_val =
4171 e1000_flash_cycle_ich8lan(hw,
4172 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4173
4174 if (!ret_val)
4175 break;
4176
4177 /* If we're here, then things are most likely
4178 * completely hosed, but if the error condition
4179 * is detected, it won't hurt to give it another
4180 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4181 */
4182 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4183
4184 if (hsfsts.hsf_status.flcerr)
4185 /* Repeat for some time before giving up. */
4186 continue;
4187 if (!hsfsts.hsf_status.flcdone) {
4188 e_dbg("Timeout error - flash cycle did not complete.\n");
4189 break;
4190 }
4191 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4192
4193 return ret_val;
4194}
4195
4196/**
3643 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM 4197 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3644 * @hw: pointer to the HW structure 4198 * @hw: pointer to the HW structure
3645 * @offset: The index of the byte to read. 4199 * @offset: The index of the byte to read.
@@ -3656,6 +4210,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3656} 4210}
3657 4211
3658/** 4212/**
4213* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4214* @hw: pointer to the HW structure
4215* @offset: The offset of the word to write.
4216* @dword: The dword to write to the NVM.
4217*
4218* Writes a single dword to the NVM using the flash access registers.
4219* Goes through a retry algorithm before giving up.
4220**/
4221static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4222 u32 offset, u32 dword)
4223{
4224 s32 ret_val;
4225 u16 program_retries;
4226
4227 /* Must convert word offset into bytes. */
4228 offset <<= 1;
4229 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4230
4231 if (!ret_val)
4232 return ret_val;
4233 for (program_retries = 0; program_retries < 100; program_retries++) {
4234 e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4235 usleep_range(100, 200);
4236 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4237 if (!ret_val)
4238 break;
4239 }
4240 if (program_retries == 100)
4241 return -E1000_ERR_NVM;
4242
4243 return 0;
4244}
4245
4246/**
3659 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM 4247 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3660 * @hw: pointer to the HW structure 4248 * @hw: pointer to the HW structure
3661 * @offset: The offset of the byte to write. 4249 * @offset: The offset of the byte to write.
@@ -3759,9 +4347,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3759 /* Write a value 11 (block Erase) in Flash 4347 /* Write a value 11 (block Erase) in Flash
3760 * Cycle field in hw flash control 4348 * Cycle field in hw flash control
3761 */ 4349 */
3762 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 4350 if (hw->mac.type == e1000_pch_spt)
4351 hsflctl.regval =
4352 er32flash(ICH_FLASH_HSFSTS) >> 16;
4353 else
4354 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4355
3763 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 4356 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3764 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 4357 if (hw->mac.type == e1000_pch_spt)
4358 ew32flash(ICH_FLASH_HSFSTS,
4359 hsflctl.regval << 16);
4360 else
4361 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3765 4362
3766 /* Write the last 24 bits of an index within the 4363 /* Write the last 24 bits of an index within the
3767 * block into Flash Linear address field in Flash 4364 * block into Flash Linear address field in Flash
@@ -4180,7 +4777,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4180 ew32(RFCTL, reg); 4777 ew32(RFCTL, reg);
4181 4778
4182 /* Enable ECC on Lynxpoint */ 4779 /* Enable ECC on Lynxpoint */
4183 if (hw->mac.type == e1000_pch_lpt) { 4780 if ((hw->mac.type == e1000_pch_lpt) ||
4781 (hw->mac.type == e1000_pch_spt)) {
4184 reg = er32(PBECCSTS); 4782 reg = er32(PBECCSTS);
4185 reg |= E1000_PBECCSTS_ECC_ENABLE; 4783 reg |= E1000_PBECCSTS_ECC_ENABLE;
4186 ew32(PBECCSTS, reg); 4784 ew32(PBECCSTS, reg);
@@ -4583,7 +5181,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4583 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 5181 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4584 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || 5182 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4585 (device_id == E1000_DEV_ID_PCH_I218_LM3) || 5183 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4586 (device_id == E1000_DEV_ID_PCH_I218_V3)) { 5184 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5185 (hw->mac.type == e1000_pch_spt)) {
4587 u32 fextnvm6 = er32(FEXTNVM6); 5186 u32 fextnvm6 = er32(FEXTNVM6);
4588 5187
4589 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); 5188 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
@@ -5058,6 +5657,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = {
5058 .write = e1000_write_nvm_ich8lan, 5657 .write = e1000_write_nvm_ich8lan,
5059}; 5658};
5060 5659
5660static const struct e1000_nvm_operations spt_nvm_ops = {
5661 .acquire = e1000_acquire_nvm_ich8lan,
5662 .release = e1000_release_nvm_ich8lan,
5663 .read = e1000_read_nvm_spt,
5664 .update = e1000_update_nvm_checksum_spt,
5665 .reload = e1000e_reload_nvm_generic,
5666 .valid_led_default = e1000_valid_led_default_ich8lan,
5667 .validate = e1000_validate_nvm_checksum_ich8lan,
5668 .write = e1000_write_nvm_ich8lan,
5669};
5670
5061const struct e1000_info e1000_ich8_info = { 5671const struct e1000_info e1000_ich8_info = {
5062 .mac = e1000_ich8lan, 5672 .mac = e1000_ich8lan,
5063 .flags = FLAG_HAS_WOL 5673 .flags = FLAG_HAS_WOL
@@ -5166,3 +5776,23 @@ const struct e1000_info e1000_pch_lpt_info = {
5166 .phy_ops = &ich8_phy_ops, 5776 .phy_ops = &ich8_phy_ops,
5167 .nvm_ops = &ich8_nvm_ops, 5777 .nvm_ops = &ich8_nvm_ops,
5168}; 5778};
5779
5780const struct e1000_info e1000_pch_spt_info = {
5781 .mac = e1000_pch_spt,
5782 .flags = FLAG_IS_ICH
5783 | FLAG_HAS_WOL
5784 | FLAG_HAS_HW_TIMESTAMP
5785 | FLAG_HAS_CTRLEXT_ON_LOAD
5786 | FLAG_HAS_AMT
5787 | FLAG_HAS_FLASH
5788 | FLAG_HAS_JUMBO_FRAMES
5789 | FLAG_APME_IN_WUC,
5790 .flags2 = FLAG2_HAS_PHY_STATS
5791 | FLAG2_HAS_EEE,
5792 .pba = 26,
5793 .max_hw_frame_size = 9018,
5794 .get_variants = e1000_get_variants_ich8lan,
5795 .mac_ops = &ich8_mac_ops,
5796 .phy_ops = &ich8_phy_ops,
5797 .nvm_ops = &spt_nvm_ops,
5798};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8066a498eaac..770a573b9eea 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -95,9 +95,18 @@
95 95
96#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 96#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
97#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 97#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
98#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
99/* bit for disabling packet buffer read */
100#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
98 101
99#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 102#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
100 103
104#define K1_ENTRY_LATENCY 0
105#define K1_MIN_TIME 1
106#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
107#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
108#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
109
101#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 110#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
102 111
103#define E1000_ICH_RAR_ENTRIES 7 112#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1e8c40fd5c3d..6fa4fc05709e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
70 [board_pchlan] = &e1000_pch_info, 70 [board_pchlan] = &e1000_pch_info,
71 [board_pch2lan] = &e1000_pch2_info, 71 [board_pch2lan] = &e1000_pch2_info,
72 [board_pch_lpt] = &e1000_pch_lpt_info, 72 [board_pch_lpt] = &e1000_pch_lpt_info,
73 [board_pch_spt] = &e1000_pch_spt_info,
73}; 74};
74 75
75struct e1000_reg_info { 76struct e1000_reg_info {
@@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1796 } 1797 }
1797 1798
1798 /* Reset on uncorrectable ECC error */ 1799 /* Reset on uncorrectable ECC error */
1799 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { 1800 if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
1801 (hw->mac.type == e1000_pch_spt))) {
1800 u32 pbeccsts = er32(PBECCSTS); 1802 u32 pbeccsts = er32(PBECCSTS);
1801 1803
1802 adapter->corr_errors += 1804 adapter->corr_errors +=
@@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1876 } 1878 }
1877 1879
1878 /* Reset on uncorrectable ECC error */ 1880 /* Reset on uncorrectable ECC error */
1879 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { 1881 if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
1882 (hw->mac.type == e1000_pch_spt))) {
1880 u32 pbeccsts = er32(PBECCSTS); 1883 u32 pbeccsts = er32(PBECCSTS);
1881 1884
1882 adapter->corr_errors += 1885 adapter->corr_errors +=
@@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
2257 if (adapter->msix_entries) { 2260 if (adapter->msix_entries) {
2258 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2261 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2259 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2262 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2260 } else if (hw->mac.type == e1000_pch_lpt) { 2263 } else if ((hw->mac.type == e1000_pch_lpt) ||
2264 (hw->mac.type == e1000_pch_spt)) {
2261 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2265 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2262 } else { 2266 } else {
2263 ew32(IMS, IMS_ENABLE_MASK); 2267 ew32(IMS, IMS_ENABLE_MASK);
@@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
3014 ew32(TCTL, tctl); 3018 ew32(TCTL, tctl);
3015 3019
3016 hw->mac.ops.config_collision_dist(hw); 3020 hw->mac.ops.config_collision_dist(hw);
3021
3022 /* SPT Si errata workaround to avoid data corruption */
3023 if (hw->mac.type == e1000_pch_spt) {
3024 u32 reg_val;
3025
3026 reg_val = er32(IOSFPC);
3027 reg_val |= E1000_RCTL_RDMTS_HEX;
3028 ew32(IOSFPC, reg_val);
3029
3030 reg_val = er32(TARC(0));
3031 reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
3032 ew32(TARC(0), reg_val);
3033 }
3017} 3034}
3018 3035
3019/** 3036/**
@@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3490 struct e1000_hw *hw = &adapter->hw; 3507 struct e1000_hw *hw = &adapter->hw;
3491 u32 incvalue, incperiod, shift; 3508 u32 incvalue, incperiod, shift;
3492 3509
3493 /* Make sure clock is enabled on I217 before checking the frequency */ 3510 /* Make sure clock is enabled on I217/I218/I219 before checking
3494 if ((hw->mac.type == e1000_pch_lpt) && 3511 * the frequency
3512 */
3513 if (((hw->mac.type == e1000_pch_lpt) ||
3514 (hw->mac.type == e1000_pch_spt)) &&
3495 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && 3515 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3496 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { 3516 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3497 u32 fextnvm7 = er32(FEXTNVM7); 3517 u32 fextnvm7 = er32(FEXTNVM7);
@@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3505 switch (hw->mac.type) { 3525 switch (hw->mac.type) {
3506 case e1000_pch2lan: 3526 case e1000_pch2lan:
3507 case e1000_pch_lpt: 3527 case e1000_pch_lpt:
3508 /* On I217, the clock frequency is 25MHz or 96MHz as 3528 case e1000_pch_spt:
3509 * indicated by the System Clock Frequency Indication 3529 /* On I217, I218 and I219, the clock frequency is 25MHz
3530 * or 96MHz as indicated by the System Clock Frequency
3531 * Indication
3510 */ 3532 */
3511 if ((hw->mac.type != e1000_pch_lpt) || 3533 if (((hw->mac.type != e1000_pch_lpt) &&
3534 (hw->mac.type != e1000_pch_spt)) ||
3512 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { 3535 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3513 /* Stable 96MHz frequency */ 3536 /* Stable 96MHz frequency */
3514 incperiod = INCPERIOD_96MHz; 3537 incperiod = INCPERIOD_96MHz;
@@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3875 break; 3898 break;
3876 case e1000_pch2lan: 3899 case e1000_pch2lan:
3877 case e1000_pch_lpt: 3900 case e1000_pch_lpt:
3901 case e1000_pch_spt:
3878 fc->refresh_time = 0x0400; 3902 fc->refresh_time = 0x0400;
3879 3903
3880 if (adapter->netdev->mtu <= ETH_DATA_LEN) { 3904 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
@@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4759 adapter->stats.mgpdc += er32(MGTPDC); 4783 adapter->stats.mgpdc += er32(MGTPDC);
4760 4784
4761 /* Correctable ECC Errors */ 4785 /* Correctable ECC Errors */
4762 if (hw->mac.type == e1000_pch_lpt) { 4786 if ((hw->mac.type == e1000_pch_lpt) ||
4787 (hw->mac.type == e1000_pch_spt)) {
4763 u32 pbeccsts = er32(PBECCSTS); 4788 u32 pbeccsts = er32(PBECCSTS);
4764 4789
4765 adapter->corr_errors += 4790 adapter->corr_errors +=
@@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6144 6169
6145 if (adapter->hw.phy.type == e1000_phy_igp_3) { 6170 if (adapter->hw.phy.type == e1000_phy_igp_3) {
6146 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 6171 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6147 } else if (hw->mac.type == e1000_pch_lpt) { 6172 } else if ((hw->mac.type == e1000_pch_lpt) ||
6173 (hw->mac.type == e1000_pch_spt)) {
6148 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) 6174 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6149 /* ULP does not support wake from unicast, multicast 6175 /* ULP does not support wake from unicast, multicast
6150 * or broadcast. 6176 * or broadcast.
@@ -7213,6 +7239,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
7213 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, 7239 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7214 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, 7240 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7215 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, 7241 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
7242 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
7243 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
7244 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
7245 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
7216 7246
7217 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 7247 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
7218}; 7248};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 978ef9c4a043..1490f1e8d6aa 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -221,7 +221,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
221 switch (hw->mac.type) { 221 switch (hw->mac.type) {
222 case e1000_pch2lan: 222 case e1000_pch2lan:
223 case e1000_pch_lpt: 223 case e1000_pch_lpt:
224 if ((hw->mac.type != e1000_pch_lpt) || 224 case e1000_pch_spt:
225 if (((hw->mac.type != e1000_pch_lpt) &&
226 (hw->mac.type != e1000_pch_spt)) ||
225 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { 227 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
226 adapter->ptp_clock_info.max_adj = 24000000 - 1; 228 adapter->ptp_clock_info.max_adj = 24000000 - 1;
227 break; 229 break;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index ea235bbe50d3..85eefc4832ba 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,6 +38,7 @@
38#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ 38#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
39#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ 39#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
40#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ 40#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
41#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
41#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 42#define E1000_FCT 0x00030 /* Flow Control Type - RW */
42#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 43#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
43#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ 44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
@@ -67,6 +68,7 @@
67#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 68#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
68#define E1000_PBS 0x01008 /* Packet Buffer Size */ 69#define E1000_PBS 0x01008 /* Packet Buffer Size */
69#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ 70#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
71#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
70#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 72#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
71#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 73#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
72#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ 74#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
@@ -121,6 +123,7 @@
121 (0x054E4 + ((_i - 16) * 8))) 123 (0x054E4 + ((_i - 16) * 8)))
122#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) 124#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
123#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) 125#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
126#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
124#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ 127#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
125#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ 128#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
126#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ 129#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2b65cdcad6ba..5912fdf506a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -140,6 +140,7 @@ enum i40e_state_t {
140 __I40E_CORE_RESET_REQUESTED, 140 __I40E_CORE_RESET_REQUESTED,
141 __I40E_GLOBAL_RESET_REQUESTED, 141 __I40E_GLOBAL_RESET_REQUESTED,
142 __I40E_EMP_RESET_REQUESTED, 142 __I40E_EMP_RESET_REQUESTED,
143 __I40E_EMP_RESET_INTR_RECEIVED,
143 __I40E_FILTER_OVERFLOW_PROMISC, 144 __I40E_FILTER_OVERFLOW_PROMISC,
144 __I40E_SUSPENDED, 145 __I40E_SUSPENDED,
145 __I40E_PTP_TX_IN_PROGRESS, 146 __I40E_PTP_TX_IN_PROGRESS,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..8dbf7dd984ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1297,14 +1297,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1297 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1297 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1298 } 1298 }
1299 /* Update the link info */ 1299 /* Update the link info */
1300 status = i40e_update_link_info(hw, true); 1300 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1301 if (status) { 1301 if (status) {
1302 /* Wait a little bit (on 40G cards it sometimes takes a really 1302 /* Wait a little bit (on 40G cards it sometimes takes a really
1303 * long time for link to come back from the atomic reset) 1303 * long time for link to come back from the atomic reset)
1304 * and try once more 1304 * and try once more
1305 */ 1305 */
1306 msleep(1000); 1306 msleep(1000);
1307 status = i40e_update_link_info(hw, true); 1307 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1308 } 1308 }
1309 if (status) 1309 if (status)
1310 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1310 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1452,35 +1452,6 @@ aq_get_link_info_exit:
1452} 1452}
1453 1453
1454/** 1454/**
1455 * i40e_update_link_info
1456 * @hw: pointer to the hw struct
1457 * @enable_lse: enable/disable LinkStatusEvent reporting
1458 *
1459 * Returns the link status of the adapter
1460 **/
1461i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
1462{
1463 struct i40e_aq_get_phy_abilities_resp abilities;
1464 i40e_status status;
1465
1466 status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
1467 if (status)
1468 return status;
1469
1470 status = i40e_aq_get_phy_capabilities(hw, false, false,
1471 &abilities, NULL);
1472 if (status)
1473 return status;
1474
1475 if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
1476 hw->phy.link_info.an_enabled = true;
1477 else
1478 hw->phy.link_info.an_enabled = false;
1479
1480 return status;
1481}
1482
1483/**
1484 * i40e_aq_set_phy_int_mask 1455 * i40e_aq_set_phy_int_mask
1485 * @hw: pointer to the hw struct 1456 * @hw: pointer to the hw struct
1486 * @mask: interrupt mask to be set 1457 * @mask: interrupt mask to be set
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..43a6bf0f356f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1485,11 +1485,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1485 } else { 1485 } else {
1486 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); 1486 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1487 } 1487 }
1488 } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { 1488 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1489 i40e_pf_reset_stats(pf); 1489 if (pf->hw.partition_id == 1) {
1490 dev_info(&pf->pdev->dev, "pf clear stats called\n"); 1490 i40e_pf_reset_stats(pf);
1491 dev_info(&pf->pdev->dev, "port stats cleared\n");
1492 } else {
1493 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1494 }
1491 } else { 1495 } else {
1492 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); 1496 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1493 } 1497 }
1494 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { 1498 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1495 struct i40e_aq_desc *desc; 1499 struct i40e_aq_desc *desc;
@@ -1895,7 +1899,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1895 dev_info(&pf->pdev->dev, " read <reg>\n"); 1899 dev_info(&pf->pdev->dev, " read <reg>\n");
1896 dev_info(&pf->pdev->dev, " write <reg> <value>\n"); 1900 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1897 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); 1901 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1898 dev_info(&pf->pdev->dev, " clear_stats pf\n"); 1902 dev_info(&pf->pdev->dev, " clear_stats port\n");
1899 dev_info(&pf->pdev->dev, " pfr\n"); 1903 dev_info(&pf->pdev->dev, " pfr\n");
1900 dev_info(&pf->pdev->dev, " corer\n"); 1904 dev_info(&pf->pdev->dev, " corer\n");
1901 dev_info(&pf->pdev->dev, " globr\n"); 1905 dev_info(&pf->pdev->dev, " globr\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b8230dc205ec..8e69caf01efe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = {
113 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), 113 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
114 I40E_PF_STAT("tx_errors", stats.eth.tx_errors), 114 I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
115 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), 115 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
116 I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
117 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), 116 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
118 I40E_PF_STAT("crc_errors", stats.crc_errors), 117 I40E_PF_STAT("crc_errors", stats.crc_errors),
119 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), 118 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
@@ -621,7 +620,7 @@ static int i40e_set_settings(struct net_device *netdev,
621 return -EAGAIN; 620 return -EAGAIN;
622 } 621 }
623 622
624 status = i40e_update_link_info(hw, true); 623 status = i40e_aq_get_link_info(hw, true, NULL, NULL);
625 if (status) 624 if (status)
626 netdev_info(netdev, "Updating link info failed with error %d\n", 625 netdev_info(netdev, "Updating link info failed with error %d\n",
627 status); 626 status);
@@ -767,7 +766,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
767 err = -EAGAIN; 766 err = -EAGAIN;
768 } 767 }
769 if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { 768 if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
770 netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", 769 netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
771 status, hw->aq.asq_last_status); 770 status, hw->aq.asq_last_status);
772 err = -EAGAIN; 771 err = -EAGAIN;
773 } 772 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 27c206e62da7..8b5bf16d3270 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -381,7 +381,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
381 ctxt->pf_num = hw->pf_id; 381 ctxt->pf_num = hw->pf_id;
382 ctxt->vf_num = 0; 382 ctxt->vf_num = 0;
383 ctxt->uplink_seid = vsi->uplink_seid; 383 ctxt->uplink_seid = vsi->uplink_seid;
384 ctxt->connection_type = 0x1; 384 ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
385 ctxt->flags = I40E_AQ_VSI_TYPE_PF; 385 ctxt->flags = I40E_AQ_VSI_TYPE_PF;
386 386
387 /* FCoE VSI would need the following sections */ 387 /* FCoE VSI would need the following sections */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..d3416a4a8f5a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 2 41#define DRV_VERSION_MINOR 2
42#define DRV_VERSION_BUILD 6 42#define DRV_VERSION_BUILD 8
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -919,11 +919,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
919 pf->stat_offsets_loaded, 919 pf->stat_offsets_loaded,
920 &osd->eth.rx_discards, 920 &osd->eth.rx_discards,
921 &nsd->eth.rx_discards); 921 &nsd->eth.rx_discards);
922 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
923 pf->stat_offsets_loaded,
924 &osd->eth.tx_discards,
925 &nsd->eth.tx_discards);
926
927 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 922 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
928 I40E_GLPRT_UPRCL(hw->port), 923 I40E_GLPRT_UPRCL(hw->port),
929 pf->stat_offsets_loaded, 924 pf->stat_offsets_loaded,
@@ -2591,7 +2586,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
2591 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); 2586 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2592 writel(0, ring->tail); 2587 writel(0, ring->tail);
2593 2588
2594 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); 2589 if (ring_is_ps_enabled(ring)) {
2590 i40e_alloc_rx_headers(ring);
2591 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2592 } else {
2593 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2594 }
2595 2595
2596 return 0; 2596 return 0;
2597} 2597}
@@ -3171,7 +3171,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3171 pf->globr_count++; 3171 pf->globr_count++;
3172 } else if (val == I40E_RESET_EMPR) { 3172 } else if (val == I40E_RESET_EMPR) {
3173 pf->empr_count++; 3173 pf->empr_count++;
3174 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); 3174 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3175 } 3175 }
3176 } 3176 }
3177 3177
@@ -5037,24 +5037,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5037 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 5037 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5038 i40e_flush(&pf->hw); 5038 i40e_flush(&pf->hw);
5039 5039
5040 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
5041
5042 /* Request a Firmware Reset
5043 *
5044 * Same as Global reset, plus restarting the
5045 * embedded firmware engine.
5046 */
5047 /* enable EMP Reset */
5048 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
5049 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
5050 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
5051
5052 /* force the reset */
5053 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5054 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
5055 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5056 i40e_flush(&pf->hw);
5057
5058 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { 5040 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
5059 5041
5060 /* Request a PF Reset 5042 /* Request a PF Reset
@@ -6197,10 +6179,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6197 } 6179 }
6198 6180
6199 /* re-verify the eeprom if we just had an EMP reset */ 6181 /* re-verify the eeprom if we just had an EMP reset */
6200 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { 6182 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6201 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
6202 i40e_verify_eeprom(pf); 6183 i40e_verify_eeprom(pf);
6203 }
6204 6184
6205 i40e_clear_pxe_mode(hw); 6185 i40e_clear_pxe_mode(hw);
6206 ret = i40e_get_capabilities(pf); 6186 ret = i40e_get_capabilities(pf);
@@ -7300,7 +7280,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
7300 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | 7280 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7301 I40E_FLAG_MSI_ENABLED | 7281 I40E_FLAG_MSI_ENABLED |
7302 I40E_FLAG_MSIX_ENABLED | 7282 I40E_FLAG_MSIX_ENABLED |
7303 I40E_FLAG_RX_1BUF_ENABLED; 7283 I40E_FLAG_RX_PS_ENABLED;
7304 7284
7305 /* Set default ITR */ 7285 /* Set default ITR */
7306 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; 7286 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
@@ -7858,7 +7838,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7858 ctxt.pf_num = hw->pf_id; 7838 ctxt.pf_num = hw->pf_id;
7859 ctxt.vf_num = 0; 7839 ctxt.vf_num = 0;
7860 ctxt.uplink_seid = vsi->uplink_seid; 7840 ctxt.uplink_seid = vsi->uplink_seid;
7861 ctxt.connection_type = 0x1; /* regular data port */ 7841 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
7862 ctxt.flags = I40E_AQ_VSI_TYPE_PF; 7842 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7863 ctxt.info.valid_sections |= 7843 ctxt.info.valid_sections |=
7864 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7844 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -7871,7 +7851,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7871 ctxt.pf_num = hw->pf_id; 7851 ctxt.pf_num = hw->pf_id;
7872 ctxt.vf_num = 0; 7852 ctxt.vf_num = 0;
7873 ctxt.uplink_seid = vsi->uplink_seid; 7853 ctxt.uplink_seid = vsi->uplink_seid;
7874 ctxt.connection_type = 0x1; /* regular data port */ 7854 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
7875 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; 7855 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
7876 7856
7877 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7857 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -7890,7 +7870,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7890 ctxt.pf_num = hw->pf_id; 7870 ctxt.pf_num = hw->pf_id;
7891 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 7871 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
7892 ctxt.uplink_seid = vsi->uplink_seid; 7872 ctxt.uplink_seid = vsi->uplink_seid;
7893 ctxt.connection_type = 0x1; /* regular data port */ 7873 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
7894 ctxt.flags = I40E_AQ_VSI_TYPE_VF; 7874 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
7895 7875
7896 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); 7876 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -8905,7 +8885,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
8905 i40e_config_rss(pf); 8885 i40e_config_rss(pf);
8906 8886
8907 /* fill in link information and enable LSE reporting */ 8887 /* fill in link information and enable LSE reporting */
8908 i40e_update_link_info(&pf->hw, true); 8888 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
8909 i40e_link_event(pf); 8889 i40e_link_event(pf);
8910 8890
8911 /* Initialize user-specific link properties */ 8891 /* Initialize user-specific link properties */
@@ -8913,7 +8893,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
8913 I40E_AQ_AN_COMPLETED) ? true : false); 8893 I40E_AQ_AN_COMPLETED) ? true : false);
8914 8894
8915 /* fill in link information and enable LSE reporting */ 8895 /* fill in link information and enable LSE reporting */
8916 i40e_update_link_info(&pf->hw, true); 8896 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
8917 i40e_link_event(pf); 8897 i40e_link_event(pf);
8918 8898
8919 /* Initialize user-specific link properties */ 8899 /* Initialize user-specific link properties */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..28429c8fbc98 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
164} 164}
165 165
166/** 166/**
167 * i40e_read_nvm_word - Reads Shadow RAM 167 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
168 * @hw: pointer to the HW structure 168 * @hw: pointer to the HW structure
169 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 169 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
170 * @data: word read from the Shadow RAM 170 * @data: word read from the Shadow RAM
171 * 171 *
172 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 172 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
173 **/ 173 **/
174i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 174i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
175 u16 *data) 175 u16 *data)
176{ 176{
177 i40e_status ret_code = I40E_ERR_TIMEOUT; 177 i40e_status ret_code = I40E_ERR_TIMEOUT;
178 u32 sr_reg; 178 u32 sr_reg;
@@ -200,6 +200,7 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
200 *data = (u16)((sr_reg & 200 *data = (u16)((sr_reg &
201 I40E_GLNVM_SRDATA_RDDATA_MASK) 201 I40E_GLNVM_SRDATA_RDDATA_MASK)
202 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); 202 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
203 *data = le16_to_cpu(*data);
203 } 204 }
204 } 205 }
205 if (ret_code) 206 if (ret_code)
@@ -212,7 +213,21 @@ read_nvm_exit:
212} 213}
213 214
214/** 215/**
215 * i40e_read_nvm_buffer - Reads Shadow RAM buffer 216 * i40e_read_nvm_word - Reads Shadow RAM
217 * @hw: pointer to the HW structure
218 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
219 * @data: word read from the Shadow RAM
220 *
221 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
222 **/
223i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
224 u16 *data)
225{
226 return i40e_read_nvm_word_srctl(hw, offset, data);
227}
228
229/**
230 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
216 * @hw: pointer to the HW structure 231 * @hw: pointer to the HW structure
217 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 232 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
218 * @words: (in) number of words to read; (out) number of words actually read 233 * @words: (in) number of words to read; (out) number of words actually read
@@ -222,8 +237,8 @@ read_nvm_exit:
222 * method. The buffer read is preceded by the NVM ownership take 237 * method. The buffer read is preceded by the NVM ownership take
223 * and followed by the release. 238 * and followed by the release.
224 **/ 239 **/
225i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 240i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
226 u16 *words, u16 *data) 241 u16 *words, u16 *data)
227{ 242{
228 i40e_status ret_code = 0; 243 i40e_status ret_code = 0;
229 u16 index, word; 244 u16 index, word;
@@ -231,7 +246,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
231 /* Loop thru the selected region */ 246 /* Loop thru the selected region */
232 for (word = 0; word < *words; word++) { 247 for (word = 0; word < *words; word++) {
233 index = offset + word; 248 index = offset + word;
234 ret_code = i40e_read_nvm_word(hw, index, &data[word]); 249 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
235 if (ret_code) 250 if (ret_code)
236 break; 251 break;
237 } 252 }
@@ -243,6 +258,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
243} 258}
244 259
245/** 260/**
261 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
262 * @hw: pointer to the HW structure
263 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
264 * @words: (in) number of words to read; (out) number of words actually read
265 * @data: words read from the Shadow RAM
266 *
267 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
268 * method. The buffer read is preceded by the NVM ownership take
269 * and followed by the release.
270 **/
271i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
272 u16 *words, u16 *data)
273{
274 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
275}
276
277/**
246 * i40e_write_nvm_aq - Writes Shadow RAM. 278 * i40e_write_nvm_aq - Writes Shadow RAM.
247 * @hw: pointer to the HW structure. 279 * @hw: pointer to the HW structure.
248 * @module_pointer: module pointer location in words from the NVM beginning 280 * @module_pointer: module pointer location in words from the NVM beginning
@@ -302,11 +334,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
302 u16 *checksum) 334 u16 *checksum)
303{ 335{
304 i40e_status ret_code = 0; 336 i40e_status ret_code = 0;
337 struct i40e_virt_mem vmem;
305 u16 pcie_alt_module = 0; 338 u16 pcie_alt_module = 0;
306 u16 checksum_local = 0; 339 u16 checksum_local = 0;
307 u16 vpd_module = 0; 340 u16 vpd_module = 0;
308 u16 word = 0; 341 u16 *data;
309 u32 i = 0; 342 u16 i = 0;
343
344 ret_code = i40e_allocate_virt_mem(hw, &vmem,
345 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
346 if (ret_code)
347 goto i40e_calc_nvm_checksum_exit;
348 data = (u16 *)vmem.va;
310 349
311 /* read pointer to VPD area */ 350 /* read pointer to VPD area */
312 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); 351 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
@@ -317,7 +356,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
317 356
318 /* read pointer to PCIe Alt Auto-load module */ 357 /* read pointer to PCIe Alt Auto-load module */
319 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, 358 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
320 &pcie_alt_module); 359 &pcie_alt_module);
321 if (ret_code) { 360 if (ret_code) {
322 ret_code = I40E_ERR_NVM_CHECKSUM; 361 ret_code = I40E_ERR_NVM_CHECKSUM;
323 goto i40e_calc_nvm_checksum_exit; 362 goto i40e_calc_nvm_checksum_exit;
@@ -327,33 +366,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
327 * except the VPD and PCIe ALT Auto-load modules 366 * except the VPD and PCIe ALT Auto-load modules
328 */ 367 */
329 for (i = 0; i < hw->nvm.sr_size; i++) { 368 for (i = 0; i < hw->nvm.sr_size; i++) {
369 /* Read SR page */
370 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
371 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
372
373 ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
374 if (ret_code) {
375 ret_code = I40E_ERR_NVM_CHECKSUM;
376 goto i40e_calc_nvm_checksum_exit;
377 }
378 }
379
330 /* Skip Checksum word */ 380 /* Skip Checksum word */
331 if (i == I40E_SR_SW_CHECKSUM_WORD) 381 if (i == I40E_SR_SW_CHECKSUM_WORD)
332 i++; 382 continue;
333 /* Skip VPD module (convert byte size to word count) */ 383 /* Skip VPD module (convert byte size to word count) */
334 if (i == (u32)vpd_module) { 384 if ((i >= (u32)vpd_module) &&
335 i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2); 385 (i < ((u32)vpd_module +
336 if (i >= hw->nvm.sr_size) 386 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
337 break; 387 continue;
338 } 388 }
339 /* Skip PCIe ALT module (convert byte size to word count) */ 389 /* Skip PCIe ALT module (convert byte size to word count) */
340 if (i == (u32)pcie_alt_module) { 390 if ((i >= (u32)pcie_alt_module) &&
341 i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2); 391 (i < ((u32)pcie_alt_module +
342 if (i >= hw->nvm.sr_size) 392 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
343 break; 393 continue;
344 } 394 }
345 395
346 ret_code = i40e_read_nvm_word(hw, (u16)i, &word); 396 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
347 if (ret_code) {
348 ret_code = I40E_ERR_NVM_CHECKSUM;
349 goto i40e_calc_nvm_checksum_exit;
350 }
351 checksum_local += word;
352 } 397 }
353 398
354 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; 399 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
355 400
356i40e_calc_nvm_checksum_exit: 401i40e_calc_nvm_checksum_exit:
402 i40e_free_virt_mem(hw, &vmem);
357 return ret_code; 403 return ret_code;
358} 404}
359 405
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 68e852a96680..1247a45603a8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -97,7 +97,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
97i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 97i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
98 bool enable_lse, struct i40e_link_status *link, 98 bool enable_lse, struct i40e_link_status *link,
99 struct i40e_asq_cmd_details *cmd_details); 99 struct i40e_asq_cmd_details *cmd_details);
100i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
101i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, 100i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
102 u64 advt_reg, 101 u64 advt_reg,
103 struct i40e_asq_cmd_details *cmd_details); 102 struct i40e_asq_cmd_details *cmd_details);
@@ -260,8 +259,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw);
260i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 259i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
261 enum i40e_aq_resource_access_type access); 260 enum i40e_aq_resource_access_type access);
262void i40e_release_nvm(struct i40e_hw *hw); 261void i40e_release_nvm(struct i40e_hw *hw);
263i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
264 u16 *data);
265i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 262i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
266 u16 *data); 263 u16 *data);
267i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 264i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 65d3c8bb2d5b..522d6df51330 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -310,6 +310,10 @@
310#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) 310#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
311#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 311#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
312#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) 312#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
313#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
314#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
315#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
316#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
313#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ 317#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
314#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 318#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
315#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) 319#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@@ -421,6 +425,8 @@
421#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) 425#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
422#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 426#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
423#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) 427#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
428#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
429#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
424#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ 430#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
425#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 431#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
426#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) 432#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@@ -484,7 +490,9 @@
484#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 490#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
485#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) 491#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
486#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 492#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
487#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) 493#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
494#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
495#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
488#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ 496#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
489#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 497#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
490#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 498#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@@ -548,9 +556,6 @@
548#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) 556#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
549#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 557#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
550#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) 558#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
551#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
552#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
553#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
554#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ 559#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
555#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 560#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
556#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) 561#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@@ -1066,7 +1071,7 @@
1066#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) 1071#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
1067#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 1072#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
1068#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) 1073#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
1069#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ 1074#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
1070#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 1075#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
1071#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) 1076#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
1072#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ 1077#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@@ -1171,7 +1176,7 @@
1171#define I40E_VFINT_ITRN_MAX_INDEX 2 1176#define I40E_VFINT_ITRN_MAX_INDEX 2
1172#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 1177#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
1173#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) 1178#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
1174#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ 1179#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
1175#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 1180#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
1176#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 1181#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
1177#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) 1182#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@@ -1803,9 +1808,6 @@
1803#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 1808#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
1804#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 1809#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
1805#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) 1810#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
1806#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
1807#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
1808#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
1809#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ 1811#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
1810#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 1812#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
1811#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) 1813#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@@ -1902,6 +1904,11 @@
1902#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) 1904#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
1903#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 1905#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
1904#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) 1906#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
1907#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
1908#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
1909#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
1910#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
1911#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
1905#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ 1912#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
1906#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 1913#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
1907#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) 1914#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@@ -2374,20 +2381,20 @@
2374#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) 2381#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
2375#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2382#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2376#define I40E_GLPRT_BPRCH_MAX_INDEX 3 2383#define I40E_GLPRT_BPRCH_MAX_INDEX 3
2377#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 2384#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
2378#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) 2385#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
2379#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2386#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2380#define I40E_GLPRT_BPRCL_MAX_INDEX 3 2387#define I40E_GLPRT_BPRCL_MAX_INDEX 3
2381#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 2388#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
2382#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) 2389#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
2383#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2390#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2384#define I40E_GLPRT_BPTCH_MAX_INDEX 3 2391#define I40E_GLPRT_BPTCH_MAX_INDEX 3
2385#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 2392#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
2386#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) 2393#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
2387#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2394#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2388#define I40E_GLPRT_BPTCL_MAX_INDEX 3 2395#define I40E_GLPRT_BPTCL_MAX_INDEX 3
2389#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 2396#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
2390#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) 2397#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
2391#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2398#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2392#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 2399#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
2393#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 2400#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@@ -2620,10 +2627,6 @@
2620#define I40E_GLPRT_TDOLD_MAX_INDEX 3 2627#define I40E_GLPRT_TDOLD_MAX_INDEX 3
2621#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 2628#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
2622#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) 2629#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
2623#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2624#define I40E_GLPRT_TDPC_MAX_INDEX 3
2625#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
2626#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
2627#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2630#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2628#define I40E_GLPRT_UPRCH_MAX_INDEX 3 2631#define I40E_GLPRT_UPRCH_MAX_INDEX 3
2629#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 2632#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@@ -2990,9 +2993,6 @@
2990#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ 2993#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
2991#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 2994#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
2992#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) 2995#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
2993#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
2994#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
2995#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
2996#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ 2996#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
2997#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 2997#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
2998#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) 2998#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3258,7 +3258,7 @@
3258#define I40E_VFINT_ITRN1_MAX_INDEX 2 3258#define I40E_VFINT_ITRN1_MAX_INDEX 2
3259#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 3259#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
3260#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) 3260#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
3261#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ 3261#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
3262#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 3262#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
3263#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) 3263#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
3264#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ 3264#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..f8c863bfa6f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include <linux/prefetch.h> 27#include <linux/prefetch.h>
28#include <net/busy_poll.h>
28#include "i40e.h" 29#include "i40e.h"
29#include "i40e_prototype.h" 30#include "i40e_prototype.h"
30 31
@@ -1025,6 +1026,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1025 if (!rx_ring->rx_bi) 1026 if (!rx_ring->rx_bi)
1026 return; 1027 return;
1027 1028
1029 if (ring_is_ps_enabled(rx_ring)) {
1030 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1031
1032 rx_bi = &rx_ring->rx_bi[0];
1033 if (rx_bi->hdr_buf) {
1034 dma_free_coherent(dev,
1035 bufsz,
1036 rx_bi->hdr_buf,
1037 rx_bi->dma);
1038 for (i = 0; i < rx_ring->count; i++) {
1039 rx_bi = &rx_ring->rx_bi[i];
1040 rx_bi->dma = 0;
1041 rx_bi->hdr_buf = 0;
1042 }
1043 }
1044 }
1028 /* Free all the Rx ring sk_buffs */ 1045 /* Free all the Rx ring sk_buffs */
1029 for (i = 0; i < rx_ring->count; i++) { 1046 for (i = 0; i < rx_ring->count; i++) {
1030 rx_bi = &rx_ring->rx_bi[i]; 1047 rx_bi = &rx_ring->rx_bi[i];
@@ -1083,6 +1100,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1083} 1100}
1084 1101
1085/** 1102/**
1103 * i40e_alloc_rx_headers - allocate rx header buffers
1104 * @rx_ring: ring to alloc buffers
1105 *
1106 * Allocate rx header buffers for the entire ring. As these are static,
1107 * this is only called when setting up a new ring.
1108 **/
1109void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1110{
1111 struct device *dev = rx_ring->dev;
1112 struct i40e_rx_buffer *rx_bi;
1113 dma_addr_t dma;
1114 void *buffer;
1115 int buf_size;
1116 int i;
1117
1118 if (rx_ring->rx_bi[0].hdr_buf)
1119 return;
1120 /* Make sure the buffers don't cross cache line boundaries. */
1121 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1122 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1123 &dma, GFP_KERNEL);
1124 if (!buffer)
1125 return;
1126 for (i = 0; i < rx_ring->count; i++) {
1127 rx_bi = &rx_ring->rx_bi[i];
1128 rx_bi->dma = dma + (i * buf_size);
1129 rx_bi->hdr_buf = buffer + (i * buf_size);
1130 }
1131}
1132
1133/**
1086 * i40e_setup_rx_descriptors - Allocate Rx descriptors 1134 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1087 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1135 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1088 * 1136 *
@@ -1142,11 +1190,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1142} 1190}
1143 1191
1144/** 1192/**
1145 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split 1193 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1146 * @rx_ring: ring to place buffers on 1194 * @rx_ring: ring to place buffers on
1147 * @cleaned_count: number of buffers to replace 1195 * @cleaned_count: number of buffers to replace
1148 **/ 1196 **/
1149void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) 1197void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1198{
1199 u16 i = rx_ring->next_to_use;
1200 union i40e_rx_desc *rx_desc;
1201 struct i40e_rx_buffer *bi;
1202
1203 /* do nothing if no valid netdev defined */
1204 if (!rx_ring->netdev || !cleaned_count)
1205 return;
1206
1207 while (cleaned_count--) {
1208 rx_desc = I40E_RX_DESC(rx_ring, i);
1209 bi = &rx_ring->rx_bi[i];
1210
1211 if (bi->skb) /* desc is in use */
1212 goto no_buffers;
1213 if (!bi->page) {
1214 bi->page = alloc_page(GFP_ATOMIC);
1215 if (!bi->page) {
1216 rx_ring->rx_stats.alloc_page_failed++;
1217 goto no_buffers;
1218 }
1219 }
1220
1221 if (!bi->page_dma) {
1222 /* use a half page if we're re-using */
1223 bi->page_offset ^= PAGE_SIZE / 2;
1224 bi->page_dma = dma_map_page(rx_ring->dev,
1225 bi->page,
1226 bi->page_offset,
1227 PAGE_SIZE / 2,
1228 DMA_FROM_DEVICE);
1229 if (dma_mapping_error(rx_ring->dev,
1230 bi->page_dma)) {
1231 rx_ring->rx_stats.alloc_page_failed++;
1232 bi->page_dma = 0;
1233 goto no_buffers;
1234 }
1235 }
1236
1237 dma_sync_single_range_for_device(rx_ring->dev,
1238 bi->dma,
1239 0,
1240 rx_ring->rx_hdr_len,
1241 DMA_FROM_DEVICE);
1242 /* Refresh the desc even if buffer_addrs didn't change
1243 * because each write-back erases this info.
1244 */
1245 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1246 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1247 i++;
1248 if (i == rx_ring->count)
1249 i = 0;
1250 }
1251
1252no_buffers:
1253 if (rx_ring->next_to_use != i)
1254 i40e_release_rx_desc(rx_ring, i);
1255}
1256
1257/**
1258 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1259 * @rx_ring: ring to place buffers on
1260 * @cleaned_count: number of buffers to replace
1261 **/
1262void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1150{ 1263{
1151 u16 i = rx_ring->next_to_use; 1264 u16 i = rx_ring->next_to_use;
1152 union i40e_rx_desc *rx_desc; 1265 union i40e_rx_desc *rx_desc;
@@ -1186,40 +1299,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1186 } 1299 }
1187 } 1300 }
1188 1301
1189 if (ring_is_ps_enabled(rx_ring)) { 1302 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1190 if (!bi->page) { 1303 rx_desc->read.hdr_addr = 0;
1191 bi->page = alloc_page(GFP_ATOMIC);
1192 if (!bi->page) {
1193 rx_ring->rx_stats.alloc_page_failed++;
1194 goto no_buffers;
1195 }
1196 }
1197
1198 if (!bi->page_dma) {
1199 /* use a half page if we're re-using */
1200 bi->page_offset ^= PAGE_SIZE / 2;
1201 bi->page_dma = dma_map_page(rx_ring->dev,
1202 bi->page,
1203 bi->page_offset,
1204 PAGE_SIZE / 2,
1205 DMA_FROM_DEVICE);
1206 if (dma_mapping_error(rx_ring->dev,
1207 bi->page_dma)) {
1208 rx_ring->rx_stats.alloc_page_failed++;
1209 bi->page_dma = 0;
1210 goto no_buffers;
1211 }
1212 }
1213
1214 /* Refresh the desc even if buffer_addrs didn't change
1215 * because each write-back erases this info.
1216 */
1217 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1218 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1219 } else {
1220 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1221 rx_desc->read.hdr_addr = 0;
1222 }
1223 i++; 1304 i++;
1224 if (i == rx_ring->count) 1305 if (i == rx_ring->count)
1225 i = 0; 1306 i = 0;
@@ -1404,13 +1485,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1404} 1485}
1405 1486
1406/** 1487/**
1407 * i40e_clean_rx_irq - Reclaim resources after receive completes 1488 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1408 * @rx_ring: rx ring to clean 1489 * @rx_ring: rx ring to clean
1409 * @budget: how many cleans we're allowed 1490 * @budget: how many cleans we're allowed
1410 * 1491 *
1411 * Returns true if there's any budget left (e.g. the clean is finished) 1492 * Returns true if there's any budget left (e.g. the clean is finished)
1412 **/ 1493 **/
1413static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) 1494static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1414{ 1495{
1415 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1496 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1416 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; 1497 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -1426,25 +1507,51 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1426 if (budget <= 0) 1507 if (budget <= 0)
1427 return 0; 1508 return 0;
1428 1509
1429 rx_desc = I40E_RX_DESC(rx_ring, i); 1510 do {
1430 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1431 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1432 I40E_RXD_QW1_STATUS_SHIFT;
1433
1434 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1435 union i40e_rx_desc *next_rxd;
1436 struct i40e_rx_buffer *rx_bi; 1511 struct i40e_rx_buffer *rx_bi;
1437 struct sk_buff *skb; 1512 struct sk_buff *skb;
1438 u16 vlan_tag; 1513 u16 vlan_tag;
1514 /* return some buffers to hardware, one at a time is too slow */
1515 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1516 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1517 cleaned_count = 0;
1518 }
1519
1520 i = rx_ring->next_to_clean;
1521 rx_desc = I40E_RX_DESC(rx_ring, i);
1522 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1523 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1524 I40E_RXD_QW1_STATUS_SHIFT;
1525
1526 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1527 break;
1528
1529 /* This memory barrier is needed to keep us from reading
1530 * any other fields out of the rx_desc until we know the
1531 * DD bit is set.
1532 */
1533 rmb();
1439 if (i40e_rx_is_programming_status(qword)) { 1534 if (i40e_rx_is_programming_status(qword)) {
1440 i40e_clean_programming_status(rx_ring, rx_desc); 1535 i40e_clean_programming_status(rx_ring, rx_desc);
1441 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); 1536 I40E_RX_INCREMENT(rx_ring, i);
1442 goto next_desc; 1537 continue;
1443 } 1538 }
1444 rx_bi = &rx_ring->rx_bi[i]; 1539 rx_bi = &rx_ring->rx_bi[i];
1445 skb = rx_bi->skb; 1540 skb = rx_bi->skb;
1446 prefetch(skb->data); 1541 if (likely(!skb)) {
1447 1542 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1543 rx_ring->rx_hdr_len);
1544 if (!skb)
1545 rx_ring->rx_stats.alloc_buff_failed++;
1546 /* initialize queue mapping */
1547 skb_record_rx_queue(skb, rx_ring->queue_index);
1548 /* we are reusing so sync this buffer for CPU use */
1549 dma_sync_single_range_for_cpu(rx_ring->dev,
1550 rx_bi->dma,
1551 0,
1552 rx_ring->rx_hdr_len,
1553 DMA_FROM_DEVICE);
1554 }
1448 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 1555 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1449 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 1556 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1450 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> 1557 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -1459,40 +1566,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1459 1566
1460 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 1567 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1461 I40E_RXD_QW1_PTYPE_SHIFT; 1568 I40E_RXD_QW1_PTYPE_SHIFT;
1569 prefetch(rx_bi->page);
1462 rx_bi->skb = NULL; 1570 rx_bi->skb = NULL;
1463 1571 cleaned_count++;
1464 /* This memory barrier is needed to keep us from reading 1572 if (rx_hbo || rx_sph) {
1465 * any other fields out of the rx_desc until we know the 1573 int len;
1466 * STATUS_DD bit is set
1467 */
1468 rmb();
1469
1470 /* Get the header and possibly the whole packet
1471 * If this is an skb from previous receive dma will be 0
1472 */
1473 if (rx_bi->dma) {
1474 u16 len;
1475
1476 if (rx_hbo) 1574 if (rx_hbo)
1477 len = I40E_RX_HDR_SIZE; 1575 len = I40E_RX_HDR_SIZE;
1478 else if (rx_sph)
1479 len = rx_header_len;
1480 else if (rx_packet_len)
1481 len = rx_packet_len; /* 1buf/no split found */
1482 else 1576 else
1483 len = rx_header_len; /* split always mode */ 1577 len = rx_header_len;
1484 1578 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1485 skb_put(skb, len); 1579 } else if (skb->len == 0) {
1486 dma_unmap_single(rx_ring->dev, 1580 int len;
1487 rx_bi->dma, 1581
1488 rx_ring->rx_buf_len, 1582 len = (rx_packet_len > skb_headlen(skb) ?
1489 DMA_FROM_DEVICE); 1583 skb_headlen(skb) : rx_packet_len);
1490 rx_bi->dma = 0; 1584 memcpy(__skb_put(skb, len),
1585 rx_bi->page + rx_bi->page_offset,
1586 len);
1587 rx_bi->page_offset += len;
1588 rx_packet_len -= len;
1491 } 1589 }
1492 1590
1493 /* Get the rest of the data if this was a header split */ 1591 /* Get the rest of the data if this was a header split */
1494 if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { 1592 if (rx_packet_len) {
1495
1496 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1593 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1497 rx_bi->page, 1594 rx_bi->page,
1498 rx_bi->page_offset, 1595 rx_bi->page_offset,
@@ -1514,22 +1611,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1514 DMA_FROM_DEVICE); 1611 DMA_FROM_DEVICE);
1515 rx_bi->page_dma = 0; 1612 rx_bi->page_dma = 0;
1516 } 1613 }
1517 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); 1614 I40E_RX_INCREMENT(rx_ring, i);
1518 1615
1519 if (unlikely( 1616 if (unlikely(
1520 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { 1617 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1521 struct i40e_rx_buffer *next_buffer; 1618 struct i40e_rx_buffer *next_buffer;
1522 1619
1523 next_buffer = &rx_ring->rx_bi[i]; 1620 next_buffer = &rx_ring->rx_bi[i];
1524 1621 next_buffer->skb = skb;
1525 if (ring_is_ps_enabled(rx_ring)) {
1526 rx_bi->skb = next_buffer->skb;
1527 rx_bi->dma = next_buffer->dma;
1528 next_buffer->skb = skb;
1529 next_buffer->dma = 0;
1530 }
1531 rx_ring->rx_stats.non_eop_descs++; 1622 rx_ring->rx_stats.non_eop_descs++;
1532 goto next_desc; 1623 continue;
1533 } 1624 }
1534 1625
1535 /* ERR_MASK will only have valid bits if EOP set */ 1626 /* ERR_MASK will only have valid bits if EOP set */
@@ -1538,7 +1629,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1538 /* TODO: shouldn't we increment a counter indicating the 1629 /* TODO: shouldn't we increment a counter indicating the
1539 * drop? 1630 * drop?
1540 */ 1631 */
1541 goto next_desc; 1632 continue;
1542 } 1633 }
1543 1634
1544 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), 1635 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1564,33 +1655,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1564#ifdef I40E_FCOE 1655#ifdef I40E_FCOE
1565 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { 1656 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1566 dev_kfree_skb_any(skb); 1657 dev_kfree_skb_any(skb);
1567 goto next_desc; 1658 continue;
1568 } 1659 }
1569#endif 1660#endif
1661 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1570 i40e_receive_skb(rx_ring, skb, vlan_tag); 1662 i40e_receive_skb(rx_ring, skb, vlan_tag);
1571 1663
1572 rx_ring->netdev->last_rx = jiffies; 1664 rx_ring->netdev->last_rx = jiffies;
1573 budget--;
1574next_desc:
1575 rx_desc->wb.qword1.status_error_len = 0; 1665 rx_desc->wb.qword1.status_error_len = 0;
1576 if (!budget)
1577 break;
1578 1666
1579 cleaned_count++; 1667 } while (likely(total_rx_packets < budget));
1668
1669 u64_stats_update_begin(&rx_ring->syncp);
1670 rx_ring->stats.packets += total_rx_packets;
1671 rx_ring->stats.bytes += total_rx_bytes;
1672 u64_stats_update_end(&rx_ring->syncp);
1673 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1674 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1675
1676 return total_rx_packets;
1677}
1678
1679/**
1680 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1681 * @rx_ring: rx ring to clean
1682 * @budget: how many cleans we're allowed
1683 *
1684 * Returns number of packets cleaned
1685 **/
1686static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1687{
1688 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1689 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1690 struct i40e_vsi *vsi = rx_ring->vsi;
1691 union i40e_rx_desc *rx_desc;
1692 u32 rx_error, rx_status;
1693 u16 rx_packet_len;
1694 u8 rx_ptype;
1695 u64 qword;
1696 u16 i;
1697
1698 do {
1699 struct i40e_rx_buffer *rx_bi;
1700 struct sk_buff *skb;
1701 u16 vlan_tag;
1580 /* return some buffers to hardware, one at a time is too slow */ 1702 /* return some buffers to hardware, one at a time is too slow */
1581 if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 1703 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1582 i40e_alloc_rx_buffers(rx_ring, cleaned_count); 1704 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1583 cleaned_count = 0; 1705 cleaned_count = 0;
1584 } 1706 }
1585 1707
1586 /* use prefetched values */ 1708 i = rx_ring->next_to_clean;
1587 rx_desc = next_rxd; 1709 rx_desc = I40E_RX_DESC(rx_ring, i);
1588 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1710 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1589 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1711 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1590 I40E_RXD_QW1_STATUS_SHIFT; 1712 I40E_RXD_QW1_STATUS_SHIFT;
1591 } 1713
1714 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1715 break;
1716
1717 /* This memory barrier is needed to keep us from reading
1718 * any other fields out of the rx_desc until we know the
1719 * DD bit is set.
1720 */
1721 rmb();
1722
1723 if (i40e_rx_is_programming_status(qword)) {
1724 i40e_clean_programming_status(rx_ring, rx_desc);
1725 I40E_RX_INCREMENT(rx_ring, i);
1726 continue;
1727 }
1728 rx_bi = &rx_ring->rx_bi[i];
1729 skb = rx_bi->skb;
1730 prefetch(skb->data);
1731
1732 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1733 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1734
1735 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1736 I40E_RXD_QW1_ERROR_SHIFT;
1737 rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1738
1739 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1740 I40E_RXD_QW1_PTYPE_SHIFT;
1741 rx_bi->skb = NULL;
1742 cleaned_count++;
1743
1744 /* Get the header and possibly the whole packet
1745 * If this is an skb from previous receive dma will be 0
1746 */
1747 skb_put(skb, rx_packet_len);
1748 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1749 DMA_FROM_DEVICE);
1750 rx_bi->dma = 0;
1751
1752 I40E_RX_INCREMENT(rx_ring, i);
1753
1754 if (unlikely(
1755 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1756 rx_ring->rx_stats.non_eop_descs++;
1757 continue;
1758 }
1759
1760 /* ERR_MASK will only have valid bits if EOP set */
1761 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1762 dev_kfree_skb_any(skb);
1763 /* TODO: shouldn't we increment a counter indicating the
1764 * drop?
1765 */
1766 continue;
1767 }
1768
1769 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1770 i40e_ptype_to_hash(rx_ptype));
1771 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1772 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1773 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1774 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1775 rx_ring->last_rx_timestamp = jiffies;
1776 }
1777
1778 /* probably a little skewed due to removing CRC */
1779 total_rx_bytes += skb->len;
1780 total_rx_packets++;
1781
1782 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1783
1784 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1785
1786 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1787 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1788 : 0;
1789#ifdef I40E_FCOE
1790 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1791 dev_kfree_skb_any(skb);
1792 continue;
1793 }
1794#endif
1795 i40e_receive_skb(rx_ring, skb, vlan_tag);
1796
1797 rx_ring->netdev->last_rx = jiffies;
1798 rx_desc->wb.qword1.status_error_len = 0;
1799 } while (likely(total_rx_packets < budget));
1592 1800
1593 rx_ring->next_to_clean = i;
1594 u64_stats_update_begin(&rx_ring->syncp); 1801 u64_stats_update_begin(&rx_ring->syncp);
1595 rx_ring->stats.packets += total_rx_packets; 1802 rx_ring->stats.packets += total_rx_packets;
1596 rx_ring->stats.bytes += total_rx_bytes; 1803 rx_ring->stats.bytes += total_rx_bytes;
@@ -1598,10 +1805,7 @@ next_desc:
1598 rx_ring->q_vector->rx.total_packets += total_rx_packets; 1805 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1599 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1806 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1600 1807
1601 if (cleaned_count) 1808 return total_rx_packets;
1602 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1603
1604 return budget > 0;
1605} 1809}
1606 1810
1607/** 1811/**
@@ -1622,6 +1826,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1622 bool clean_complete = true; 1826 bool clean_complete = true;
1623 bool arm_wb = false; 1827 bool arm_wb = false;
1624 int budget_per_ring; 1828 int budget_per_ring;
1829 int cleaned;
1625 1830
1626 if (test_bit(__I40E_DOWN, &vsi->state)) { 1831 if (test_bit(__I40E_DOWN, &vsi->state)) {
1627 napi_complete(napi); 1832 napi_complete(napi);
@@ -1641,8 +1846,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1641 */ 1846 */
1642 budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 1847 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1643 1848
1644 i40e_for_each_ring(ring, q_vector->rx) 1849 i40e_for_each_ring(ring, q_vector->rx) {
1645 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1850 if (ring_is_ps_enabled(ring))
1851 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1852 else
1853 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1854 /* if we didn't clean as many as budgeted, we must be done */
1855 clean_complete &= (budget_per_ring != cleaned);
1856 }
1646 1857
1647 /* If work not completed, return budget and polling will return */ 1858 /* If work not completed, return budget and polling will return */
1648 if (!clean_complete) { 1859 if (!clean_complete) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..38449b230d60 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
96 96
97/* How many Rx Buffers do we bundle into one write to the hardware ? */ 97/* How many Rx Buffers do we bundle into one write to the hardware ? */
98#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 98#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
99#define I40E_RX_INCREMENT(r, i) \
100 do { \
101 (i)++; \
102 if ((i) == (r)->count) \
103 i = 0; \
104 r->next_to_clean = i; \
105 } while (0)
106
99#define I40E_RX_NEXT_DESC(r, i, n) \ 107#define I40E_RX_NEXT_DESC(r, i, n) \
100 do { \ 108 do { \
101 (i)++; \ 109 (i)++; \
@@ -151,6 +159,7 @@ struct i40e_tx_buffer {
151 159
152struct i40e_rx_buffer { 160struct i40e_rx_buffer {
153 struct sk_buff *skb; 161 struct sk_buff *skb;
162 void *hdr_buf;
154 dma_addr_t dma; 163 dma_addr_t dma;
155 struct page *page; 164 struct page *page;
156 dma_addr_t page_dma; 165 dma_addr_t page_dma;
@@ -223,8 +232,8 @@ struct i40e_ring {
223 u16 rx_buf_len; 232 u16 rx_buf_len;
224 u8 dtype; 233 u8 dtype;
225#define I40E_RX_DTYPE_NO_SPLIT 0 234#define I40E_RX_DTYPE_NO_SPLIT 0
226#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 235#define I40E_RX_DTYPE_HEADER_SPLIT 1
227#define I40E_RX_DTYPE_HEADER_SPLIT 2 236#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
228 u8 hsplit; 237 u8 hsplit;
229#define I40E_RX_SPLIT_L2 0x1 238#define I40E_RX_SPLIT_L2 0x1
230#define I40E_RX_SPLIT_IP 0x2 239#define I40E_RX_SPLIT_IP 0x2
@@ -280,7 +289,9 @@ struct i40e_ring_container {
280#define i40e_for_each_ring(pos, head) \ 289#define i40e_for_each_ring(pos, head) \
281 for (pos = (head).ring; pos != NULL; pos = pos->next) 290 for (pos = (head).ring; pos != NULL; pos = pos->next)
282 291
283void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); 292void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
293void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
294void i40e_alloc_rx_headers(struct i40e_ring *rxr);
284netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 295netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
285void i40e_clean_tx_ring(struct i40e_ring *tx_ring); 296void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
286void i40e_clean_rx_ring(struct i40e_ring *rx_ring); 297void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index e9901ef06a63..86a927b88ef4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -175,7 +175,6 @@ struct i40e_link_status {
175 u8 an_info; 175 u8 an_info;
176 u8 ext_info; 176 u8 ext_info;
177 u8 loopback; 177 u8 loopback;
178 bool an_enabled;
179 /* is Link Status Event notification to SW enabled */ 178 /* is Link Status Event notification to SW enabled */
180 bool lse_enable; 179 bool lse_enable;
181 u16 max_frame_size; 180 u16 max_frame_size;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 61dd1b187624..2d20af290fbf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -59,31 +59,29 @@
59 * of the virtchnl_msg structure. 59 * of the virtchnl_msg structure.
60 */ 60 */
61enum i40e_virtchnl_ops { 61enum i40e_virtchnl_ops {
62/* VF sends req. to pf for the following 62/* The PF sends status change events to VFs using
63 * ops. 63 * the I40E_VIRTCHNL_OP_EVENT opcode.
64 * VFs send requests to the PF using the other ops.
64 */ 65 */
65 I40E_VIRTCHNL_OP_UNKNOWN = 0, 66 I40E_VIRTCHNL_OP_UNKNOWN = 0,
66 I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ 67 I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
67 I40E_VIRTCHNL_OP_RESET_VF, 68 I40E_VIRTCHNL_OP_RESET_VF = 2,
68 I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 69 I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
69 I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, 70 I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
70 I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, 71 I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
71 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 72 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
72 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 73 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
73 I40E_VIRTCHNL_OP_ENABLE_QUEUES, 74 I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
74 I40E_VIRTCHNL_OP_DISABLE_QUEUES, 75 I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
75 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 76 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
76 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 77 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
77 I40E_VIRTCHNL_OP_ADD_VLAN, 78 I40E_VIRTCHNL_OP_ADD_VLAN = 12,
78 I40E_VIRTCHNL_OP_DEL_VLAN, 79 I40E_VIRTCHNL_OP_DEL_VLAN = 13,
79 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
80 I40E_VIRTCHNL_OP_GET_STATS, 81 I40E_VIRTCHNL_OP_GET_STATS = 15,
81 I40E_VIRTCHNL_OP_FCOE, 82 I40E_VIRTCHNL_OP_FCOE = 16,
82 I40E_VIRTCHNL_OP_CONFIG_RSS, 83 I40E_VIRTCHNL_OP_EVENT = 17,
83/* PF sends status change events to vfs using 84 I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
84 * the following op.
85 */
86 I40E_VIRTCHNL_OP_EVENT,
87}; 85};
88 86
89/* Virtual channel message descriptor. This overlays the admin queue 87/* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 40f042af4131..5450b9f1aa3a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2427,7 +2427,8 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
2427 ctxt.pf_num = pf->hw.pf_id; 2427 ctxt.pf_num = pf->hw.pf_id;
2428 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2428 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2429 if (enable) 2429 if (enable)
2430 ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; 2430 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
2431 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
2431 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); 2432 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2432 if (ret) { 2433 if (ret) {
2433 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", 2434 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index c1f6a59bfea0..3cc737629bf7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -310,6 +310,10 @@
310#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) 310#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
311#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 311#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
312#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) 312#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
313#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
314#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
315#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
316#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
313#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ 317#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
314#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 318#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
315#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) 319#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@@ -421,6 +425,8 @@
421#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) 425#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
422#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 426#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
423#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) 427#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
428#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
429#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
424#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ 430#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
425#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 431#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
426#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) 432#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@@ -484,7 +490,9 @@
484#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 490#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
485#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) 491#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
486#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 492#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
487#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) 493#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
494#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
495#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
488#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ 496#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
489#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 497#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
490#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 498#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@@ -548,9 +556,6 @@
548#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) 556#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
549#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 557#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
550#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) 558#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
551#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
552#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
553#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
554#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ 559#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
555#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 560#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
556#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) 561#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@@ -1066,7 +1071,7 @@
1066#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) 1071#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
1067#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 1072#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
1068#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) 1073#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
1069#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ 1074#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
1070#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 1075#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
1071#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) 1076#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
1072#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ 1077#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@@ -1171,7 +1176,7 @@
1171#define I40E_VFINT_ITRN_MAX_INDEX 2 1176#define I40E_VFINT_ITRN_MAX_INDEX 2
1172#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 1177#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
1173#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) 1178#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
1174#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ 1179#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
1175#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 1180#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
1176#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 1181#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
1177#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) 1182#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@@ -1803,9 +1808,6 @@
1803#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 1808#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
1804#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 1809#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
1805#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) 1810#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
1806#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
1807#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
1808#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
1809#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ 1811#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
1810#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 1812#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
1811#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) 1813#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@@ -1902,6 +1904,11 @@
1902#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) 1904#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
1903#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 1905#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
1904#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) 1906#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
1907#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
1908#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
1909#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
1910#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
1911#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
1905#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ 1912#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
1906#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 1913#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
1907#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) 1914#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@@ -2374,20 +2381,20 @@
2374#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) 2381#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
2375#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2382#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2376#define I40E_GLPRT_BPRCH_MAX_INDEX 3 2383#define I40E_GLPRT_BPRCH_MAX_INDEX 3
2377#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 2384#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
2378#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) 2385#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
2379#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2386#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2380#define I40E_GLPRT_BPRCL_MAX_INDEX 3 2387#define I40E_GLPRT_BPRCL_MAX_INDEX 3
2381#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 2388#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
2382#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) 2389#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
2383#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2390#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2384#define I40E_GLPRT_BPTCH_MAX_INDEX 3 2391#define I40E_GLPRT_BPTCH_MAX_INDEX 3
2385#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 2392#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
2386#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) 2393#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
2387#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2394#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2388#define I40E_GLPRT_BPTCL_MAX_INDEX 3 2395#define I40E_GLPRT_BPTCL_MAX_INDEX 3
2389#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 2396#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
2390#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) 2397#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
2391#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2398#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2392#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 2399#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
2393#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 2400#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@@ -2620,10 +2627,6 @@
2620#define I40E_GLPRT_TDOLD_MAX_INDEX 3 2627#define I40E_GLPRT_TDOLD_MAX_INDEX 3
2621#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 2628#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
2622#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) 2629#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
2623#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2624#define I40E_GLPRT_TDPC_MAX_INDEX 3
2625#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
2626#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
2627#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ 2630#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
2628#define I40E_GLPRT_UPRCH_MAX_INDEX 3 2631#define I40E_GLPRT_UPRCH_MAX_INDEX 3
2629#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 2632#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@@ -2990,9 +2993,6 @@
2990#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ 2993#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
2991#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 2994#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
2992#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) 2995#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
2993#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
2994#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
2995#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
2996#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ 2996#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
2997#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 2997#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
2998#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) 2998#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3258,7 +3258,7 @@
3258#define I40E_VFINT_ITRN1_MAX_INDEX 2 3258#define I40E_VFINT_ITRN1_MAX_INDEX 2
3259#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 3259#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
3260#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) 3260#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
3261#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ 3261#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
3262#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 3262#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
3263#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) 3263#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
3264#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ 3264#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..fc7e2d0b755c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -25,6 +25,7 @@
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include <linux/prefetch.h> 27#include <linux/prefetch.h>
28#include <net/busy_poll.h>
28 29
29#include "i40evf.h" 30#include "i40evf.h"
30#include "i40e_prototype.h" 31#include "i40e_prototype.h"
@@ -523,6 +524,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
523 if (!rx_ring->rx_bi) 524 if (!rx_ring->rx_bi)
524 return; 525 return;
525 526
527 if (ring_is_ps_enabled(rx_ring)) {
528 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
529
530 rx_bi = &rx_ring->rx_bi[0];
531 if (rx_bi->hdr_buf) {
532 dma_free_coherent(dev,
533 bufsz,
534 rx_bi->hdr_buf,
535 rx_bi->dma);
536 for (i = 0; i < rx_ring->count; i++) {
537 rx_bi = &rx_ring->rx_bi[i];
538 rx_bi->dma = 0;
539 rx_bi->hdr_buf = 0;
540 }
541 }
542 }
526 /* Free all the Rx ring sk_buffs */ 543 /* Free all the Rx ring sk_buffs */
527 for (i = 0; i < rx_ring->count; i++) { 544 for (i = 0; i < rx_ring->count; i++) {
528 rx_bi = &rx_ring->rx_bi[i]; 545 rx_bi = &rx_ring->rx_bi[i];
@@ -581,6 +598,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
581} 598}
582 599
583/** 600/**
601 * i40evf_alloc_rx_headers - allocate rx header buffers
602 * @rx_ring: ring to alloc buffers
603 *
604 * Allocate rx header buffers for the entire ring. As these are static,
605 * this is only called when setting up a new ring.
606 **/
607void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
608{
609 struct device *dev = rx_ring->dev;
610 struct i40e_rx_buffer *rx_bi;
611 dma_addr_t dma;
612 void *buffer;
613 int buf_size;
614 int i;
615
616 if (rx_ring->rx_bi[0].hdr_buf)
617 return;
618 /* Make sure the buffers don't cross cache line boundaries. */
619 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
620 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
621 &dma, GFP_KERNEL);
622 if (!buffer)
623 return;
624 for (i = 0; i < rx_ring->count; i++) {
625 rx_bi = &rx_ring->rx_bi[i];
626 rx_bi->dma = dma + (i * buf_size);
627 rx_bi->hdr_buf = buffer + (i * buf_size);
628 }
629}
630
631/**
584 * i40evf_setup_rx_descriptors - Allocate Rx descriptors 632 * i40evf_setup_rx_descriptors - Allocate Rx descriptors
585 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 633 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
586 * 634 *
@@ -640,11 +688,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
640} 688}
641 689
642/** 690/**
643 * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split 691 * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
644 * @rx_ring: ring to place buffers on 692 * @rx_ring: ring to place buffers on
645 * @cleaned_count: number of buffers to replace 693 * @cleaned_count: number of buffers to replace
646 **/ 694 **/
647void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) 695void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
696{
697 u16 i = rx_ring->next_to_use;
698 union i40e_rx_desc *rx_desc;
699 struct i40e_rx_buffer *bi;
700
701 /* do nothing if no valid netdev defined */
702 if (!rx_ring->netdev || !cleaned_count)
703 return;
704
705 while (cleaned_count--) {
706 rx_desc = I40E_RX_DESC(rx_ring, i);
707 bi = &rx_ring->rx_bi[i];
708
709 if (bi->skb) /* desc is in use */
710 goto no_buffers;
711 if (!bi->page) {
712 bi->page = alloc_page(GFP_ATOMIC);
713 if (!bi->page) {
714 rx_ring->rx_stats.alloc_page_failed++;
715 goto no_buffers;
716 }
717 }
718
719 if (!bi->page_dma) {
720 /* use a half page if we're re-using */
721 bi->page_offset ^= PAGE_SIZE / 2;
722 bi->page_dma = dma_map_page(rx_ring->dev,
723 bi->page,
724 bi->page_offset,
725 PAGE_SIZE / 2,
726 DMA_FROM_DEVICE);
727 if (dma_mapping_error(rx_ring->dev,
728 bi->page_dma)) {
729 rx_ring->rx_stats.alloc_page_failed++;
730 bi->page_dma = 0;
731 goto no_buffers;
732 }
733 }
734
735 dma_sync_single_range_for_device(rx_ring->dev,
736 bi->dma,
737 0,
738 rx_ring->rx_hdr_len,
739 DMA_FROM_DEVICE);
740 /* Refresh the desc even if buffer_addrs didn't change
741 * because each write-back erases this info.
742 */
743 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
744 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
745 i++;
746 if (i == rx_ring->count)
747 i = 0;
748 }
749
750no_buffers:
751 if (rx_ring->next_to_use != i)
752 i40e_release_rx_desc(rx_ring, i);
753}
754
755/**
756 * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
757 * @rx_ring: ring to place buffers on
758 * @cleaned_count: number of buffers to replace
759 **/
760void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
648{ 761{
649 u16 i = rx_ring->next_to_use; 762 u16 i = rx_ring->next_to_use;
650 union i40e_rx_desc *rx_desc; 763 union i40e_rx_desc *rx_desc;
@@ -684,40 +797,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
684 } 797 }
685 } 798 }
686 799
687 if (ring_is_ps_enabled(rx_ring)) { 800 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
688 if (!bi->page) { 801 rx_desc->read.hdr_addr = 0;
689 bi->page = alloc_page(GFP_ATOMIC);
690 if (!bi->page) {
691 rx_ring->rx_stats.alloc_page_failed++;
692 goto no_buffers;
693 }
694 }
695
696 if (!bi->page_dma) {
697 /* use a half page if we're re-using */
698 bi->page_offset ^= PAGE_SIZE / 2;
699 bi->page_dma = dma_map_page(rx_ring->dev,
700 bi->page,
701 bi->page_offset,
702 PAGE_SIZE / 2,
703 DMA_FROM_DEVICE);
704 if (dma_mapping_error(rx_ring->dev,
705 bi->page_dma)) {
706 rx_ring->rx_stats.alloc_page_failed++;
707 bi->page_dma = 0;
708 goto no_buffers;
709 }
710 }
711
712 /* Refresh the desc even if buffer_addrs didn't change
713 * because each write-back erases this info.
714 */
715 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
716 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
717 } else {
718 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
719 rx_desc->read.hdr_addr = 0;
720 }
721 i++; 802 i++;
722 if (i == rx_ring->count) 803 if (i == rx_ring->count)
723 i = 0; 804 i = 0;
@@ -900,13 +981,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
900} 981}
901 982
902/** 983/**
903 * i40e_clean_rx_irq - Reclaim resources after receive completes 984 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
904 * @rx_ring: rx ring to clean 985 * @rx_ring: rx ring to clean
905 * @budget: how many cleans we're allowed 986 * @budget: how many cleans we're allowed
906 * 987 *
907 * Returns true if there's any budget left (e.g. the clean is finished) 988 * Returns true if there's any budget left (e.g. the clean is finished)
908 **/ 989 **/
909static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) 990static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
910{ 991{
911 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 992 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
912 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; 993 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -919,20 +1000,46 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
919 u8 rx_ptype; 1000 u8 rx_ptype;
920 u64 qword; 1001 u64 qword;
921 1002
922 rx_desc = I40E_RX_DESC(rx_ring, i); 1003 do {
923 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
924 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
925 I40E_RXD_QW1_STATUS_SHIFT;
926
927 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
928 union i40e_rx_desc *next_rxd;
929 struct i40e_rx_buffer *rx_bi; 1004 struct i40e_rx_buffer *rx_bi;
930 struct sk_buff *skb; 1005 struct sk_buff *skb;
931 u16 vlan_tag; 1006 u16 vlan_tag;
1007 /* return some buffers to hardware, one at a time is too slow */
1008 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1009 i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1010 cleaned_count = 0;
1011 }
1012
1013 i = rx_ring->next_to_clean;
1014 rx_desc = I40E_RX_DESC(rx_ring, i);
1015 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1016 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1017 I40E_RXD_QW1_STATUS_SHIFT;
1018
1019 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1020 break;
1021
1022 /* This memory barrier is needed to keep us from reading
1023 * any other fields out of the rx_desc until we know the
1024 * DD bit is set.
1025 */
1026 rmb();
932 rx_bi = &rx_ring->rx_bi[i]; 1027 rx_bi = &rx_ring->rx_bi[i];
933 skb = rx_bi->skb; 1028 skb = rx_bi->skb;
934 prefetch(skb->data); 1029 if (likely(!skb)) {
935 1030 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1031 rx_ring->rx_hdr_len);
1032 if (!skb)
1033 rx_ring->rx_stats.alloc_buff_failed++;
1034 /* initialize queue mapping */
1035 skb_record_rx_queue(skb, rx_ring->queue_index);
1036 /* we are reusing so sync this buffer for CPU use */
1037 dma_sync_single_range_for_cpu(rx_ring->dev,
1038 rx_bi->dma,
1039 0,
1040 rx_ring->rx_hdr_len,
1041 DMA_FROM_DEVICE);
1042 }
936 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 1043 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
937 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 1044 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
938 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> 1045 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -947,40 +1054,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
947 1054
948 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> 1055 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
949 I40E_RXD_QW1_PTYPE_SHIFT; 1056 I40E_RXD_QW1_PTYPE_SHIFT;
1057 prefetch(rx_bi->page);
950 rx_bi->skb = NULL; 1058 rx_bi->skb = NULL;
951 1059 cleaned_count++;
952 /* This memory barrier is needed to keep us from reading 1060 if (rx_hbo || rx_sph) {
953 * any other fields out of the rx_desc until we know the 1061 int len;
954 * STATUS_DD bit is set
955 */
956 rmb();
957
958 /* Get the header and possibly the whole packet
959 * If this is an skb from previous receive dma will be 0
960 */
961 if (rx_bi->dma) {
962 u16 len;
963
964 if (rx_hbo) 1062 if (rx_hbo)
965 len = I40E_RX_HDR_SIZE; 1063 len = I40E_RX_HDR_SIZE;
966 else if (rx_sph)
967 len = rx_header_len;
968 else if (rx_packet_len)
969 len = rx_packet_len; /* 1buf/no split found */
970 else 1064 else
971 len = rx_header_len; /* split always mode */ 1065 len = rx_header_len;
972 1066 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
973 skb_put(skb, len); 1067 } else if (skb->len == 0) {
974 dma_unmap_single(rx_ring->dev, 1068 int len;
975 rx_bi->dma, 1069
976 rx_ring->rx_buf_len, 1070 len = (rx_packet_len > skb_headlen(skb) ?
977 DMA_FROM_DEVICE); 1071 skb_headlen(skb) : rx_packet_len);
978 rx_bi->dma = 0; 1072 memcpy(__skb_put(skb, len),
1073 rx_bi->page + rx_bi->page_offset,
1074 len);
1075 rx_bi->page_offset += len;
1076 rx_packet_len -= len;
979 } 1077 }
980 1078
981 /* Get the rest of the data if this was a header split */ 1079 /* Get the rest of the data if this was a header split */
982 if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { 1080 if (rx_packet_len) {
983
984 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1081 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
985 rx_bi->page, 1082 rx_bi->page,
986 rx_bi->page_offset, 1083 rx_bi->page_offset,
@@ -1002,22 +1099,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1002 DMA_FROM_DEVICE); 1099 DMA_FROM_DEVICE);
1003 rx_bi->page_dma = 0; 1100 rx_bi->page_dma = 0;
1004 } 1101 }
1005 I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); 1102 I40E_RX_INCREMENT(rx_ring, i);
1006 1103
1007 if (unlikely( 1104 if (unlikely(
1008 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { 1105 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1009 struct i40e_rx_buffer *next_buffer; 1106 struct i40e_rx_buffer *next_buffer;
1010 1107
1011 next_buffer = &rx_ring->rx_bi[i]; 1108 next_buffer = &rx_ring->rx_bi[i];
1012 1109 next_buffer->skb = skb;
1013 if (ring_is_ps_enabled(rx_ring)) {
1014 rx_bi->skb = next_buffer->skb;
1015 rx_bi->dma = next_buffer->dma;
1016 next_buffer->skb = skb;
1017 next_buffer->dma = 0;
1018 }
1019 rx_ring->rx_stats.non_eop_descs++; 1110 rx_ring->rx_stats.non_eop_descs++;
1020 goto next_desc; 1111 continue;
1021 } 1112 }
1022 1113
1023 /* ERR_MASK will only have valid bits if EOP set */ 1114 /* ERR_MASK will only have valid bits if EOP set */
@@ -1026,7 +1117,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1026 /* TODO: shouldn't we increment a counter indicating the 1117 /* TODO: shouldn't we increment a counter indicating the
1027 * drop? 1118 * drop?
1028 */ 1119 */
1029 goto next_desc; 1120 continue;
1030 } 1121 }
1031 1122
1032 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), 1123 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1042,30 +1133,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1042 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) 1133 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1043 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) 1134 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1044 : 0; 1135 : 0;
1136#ifdef I40E_FCOE
1137 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1138 dev_kfree_skb_any(skb);
1139 continue;
1140 }
1141#endif
1142 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1045 i40e_receive_skb(rx_ring, skb, vlan_tag); 1143 i40e_receive_skb(rx_ring, skb, vlan_tag);
1046 1144
1047 rx_ring->netdev->last_rx = jiffies; 1145 rx_ring->netdev->last_rx = jiffies;
1048 budget--;
1049next_desc:
1050 rx_desc->wb.qword1.status_error_len = 0; 1146 rx_desc->wb.qword1.status_error_len = 0;
1051 if (!budget)
1052 break;
1053 1147
1054 cleaned_count++; 1148 } while (likely(total_rx_packets < budget));
1149
1150 u64_stats_update_begin(&rx_ring->syncp);
1151 rx_ring->stats.packets += total_rx_packets;
1152 rx_ring->stats.bytes += total_rx_bytes;
1153 u64_stats_update_end(&rx_ring->syncp);
1154 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1155 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1156
1157 return total_rx_packets;
1158}
1159
1160/**
1161 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1162 * @rx_ring: rx ring to clean
1163 * @budget: how many cleans we're allowed
1164 *
1165 * Returns number of packets cleaned
1166 **/
1167static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1168{
1169 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1170 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1171 struct i40e_vsi *vsi = rx_ring->vsi;
1172 union i40e_rx_desc *rx_desc;
1173 u32 rx_error, rx_status;
1174 u16 rx_packet_len;
1175 u8 rx_ptype;
1176 u64 qword;
1177 u16 i;
1178
1179 do {
1180 struct i40e_rx_buffer *rx_bi;
1181 struct sk_buff *skb;
1182 u16 vlan_tag;
1055 /* return some buffers to hardware, one at a time is too slow */ 1183 /* return some buffers to hardware, one at a time is too slow */
1056 if (cleaned_count >= I40E_RX_BUFFER_WRITE) { 1184 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1057 i40evf_alloc_rx_buffers(rx_ring, cleaned_count); 1185 i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1058 cleaned_count = 0; 1186 cleaned_count = 0;
1059 } 1187 }
1060 1188
1061 /* use prefetched values */ 1189 i = rx_ring->next_to_clean;
1062 rx_desc = next_rxd; 1190 rx_desc = I40E_RX_DESC(rx_ring, i);
1063 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1191 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1064 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> 1192 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1065 I40E_RXD_QW1_STATUS_SHIFT; 1193 I40E_RXD_QW1_STATUS_SHIFT;
1066 } 1194
1195 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
1196 break;
1197
1198 /* This memory barrier is needed to keep us from reading
1199 * any other fields out of the rx_desc until we know the
1200 * DD bit is set.
1201 */
1202 rmb();
1203
1204 rx_bi = &rx_ring->rx_bi[i];
1205 skb = rx_bi->skb;
1206 prefetch(skb->data);
1207
1208 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1209 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1210
1211 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1212 I40E_RXD_QW1_ERROR_SHIFT;
1213 rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1214
1215 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1216 I40E_RXD_QW1_PTYPE_SHIFT;
1217 rx_bi->skb = NULL;
1218 cleaned_count++;
1219
1220 /* Get the header and possibly the whole packet
1221 * If this is an skb from previous receive dma will be 0
1222 */
1223 skb_put(skb, rx_packet_len);
1224 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1225 DMA_FROM_DEVICE);
1226 rx_bi->dma = 0;
1227
1228 I40E_RX_INCREMENT(rx_ring, i);
1229
1230 if (unlikely(
1231 !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1232 rx_ring->rx_stats.non_eop_descs++;
1233 continue;
1234 }
1235
1236 /* ERR_MASK will only have valid bits if EOP set */
1237 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1238 dev_kfree_skb_any(skb);
1239 /* TODO: shouldn't we increment a counter indicating the
1240 * drop?
1241 */
1242 continue;
1243 }
1244
1245 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1246 i40e_ptype_to_hash(rx_ptype));
1247 /* probably a little skewed due to removing CRC */
1248 total_rx_bytes += skb->len;
1249 total_rx_packets++;
1250
1251 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1252
1253 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1254
1255 vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1256 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1257 : 0;
1258 i40e_receive_skb(rx_ring, skb, vlan_tag);
1259
1260 rx_ring->netdev->last_rx = jiffies;
1261 rx_desc->wb.qword1.status_error_len = 0;
1262 } while (likely(total_rx_packets < budget));
1067 1263
1068 rx_ring->next_to_clean = i;
1069 u64_stats_update_begin(&rx_ring->syncp); 1264 u64_stats_update_begin(&rx_ring->syncp);
1070 rx_ring->stats.packets += total_rx_packets; 1265 rx_ring->stats.packets += total_rx_packets;
1071 rx_ring->stats.bytes += total_rx_bytes; 1266 rx_ring->stats.bytes += total_rx_bytes;
@@ -1073,10 +1268,7 @@ next_desc:
1073 rx_ring->q_vector->rx.total_packets += total_rx_packets; 1268 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1074 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 1269 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1075 1270
1076 if (cleaned_count) 1271 return total_rx_packets;
1077 i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
1078
1079 return budget > 0;
1080} 1272}
1081 1273
1082/** 1274/**
@@ -1097,6 +1289,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1097 bool clean_complete = true; 1289 bool clean_complete = true;
1098 bool arm_wb = false; 1290 bool arm_wb = false;
1099 int budget_per_ring; 1291 int budget_per_ring;
1292 int cleaned;
1100 1293
1101 if (test_bit(__I40E_DOWN, &vsi->state)) { 1294 if (test_bit(__I40E_DOWN, &vsi->state)) {
1102 napi_complete(napi); 1295 napi_complete(napi);
@@ -1116,8 +1309,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1116 */ 1309 */
1117 budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 1310 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1118 1311
1119 i40e_for_each_ring(ring, q_vector->rx) 1312 i40e_for_each_ring(ring, q_vector->rx) {
1120 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1313 if (ring_is_ps_enabled(ring))
1314 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1315 else
1316 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1317 /* if we didn't clean as many as budgeted, we must be done */
1318 clean_complete &= (budget_per_ring != cleaned);
1319 }
1121 1320
1122 /* If work not completed, return budget and polling will return */ 1321 /* If work not completed, return budget and polling will return */
1123 if (!clean_complete) { 1322 if (!clean_complete) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..ffdda716813e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
96 96
97/* How many Rx Buffers do we bundle into one write to the hardware ? */ 97/* How many Rx Buffers do we bundle into one write to the hardware ? */
98#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 98#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
99#define I40E_RX_INCREMENT(r, i) \
100 do { \
101 (i)++; \
102 if ((i) == (r)->count) \
103 i = 0; \
104 r->next_to_clean = i; \
105 } while (0)
106
99#define I40E_RX_NEXT_DESC(r, i, n) \ 107#define I40E_RX_NEXT_DESC(r, i, n) \
100 do { \ 108 do { \
101 (i)++; \ 109 (i)++; \
@@ -150,6 +158,7 @@ struct i40e_tx_buffer {
150 158
151struct i40e_rx_buffer { 159struct i40e_rx_buffer {
152 struct sk_buff *skb; 160 struct sk_buff *skb;
161 void *hdr_buf;
153 dma_addr_t dma; 162 dma_addr_t dma;
154 struct page *page; 163 struct page *page;
155 dma_addr_t page_dma; 164 dma_addr_t page_dma;
@@ -222,8 +231,8 @@ struct i40e_ring {
222 u16 rx_buf_len; 231 u16 rx_buf_len;
223 u8 dtype; 232 u8 dtype;
224#define I40E_RX_DTYPE_NO_SPLIT 0 233#define I40E_RX_DTYPE_NO_SPLIT 0
225#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 234#define I40E_RX_DTYPE_HEADER_SPLIT 1
226#define I40E_RX_DTYPE_HEADER_SPLIT 2 235#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
227 u8 hsplit; 236 u8 hsplit;
228#define I40E_RX_SPLIT_L2 0x1 237#define I40E_RX_SPLIT_L2 0x1
229#define I40E_RX_SPLIT_IP 0x2 238#define I40E_RX_SPLIT_IP 0x2
@@ -277,7 +286,9 @@ struct i40e_ring_container {
277#define i40e_for_each_ring(pos, head) \ 286#define i40e_for_each_ring(pos, head) \
278 for (pos = (head).ring; pos != NULL; pos = pos->next) 287 for (pos = (head).ring; pos != NULL; pos = pos->next)
279 288
280void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); 289void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
290void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
291void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
281netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 292netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
282void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); 293void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
283void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); 294void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3d0fdaab5cc8..c8cd8afdbf8f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -175,7 +175,6 @@ struct i40e_link_status {
175 u8 an_info; 175 u8 an_info;
176 u8 ext_info; 176 u8 ext_info;
177 u8 loopback; 177 u8 loopback;
178 bool an_enabled;
179 /* is Link Status Event notification to SW enabled */ 178 /* is Link Status Event notification to SW enabled */
180 bool lse_enable; 179 bool lse_enable;
181 u16 max_frame_size; 180 u16 max_frame_size;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index e0c8208138f4..59f62f0e65dd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -59,31 +59,29 @@
59 * of the virtchnl_msg structure. 59 * of the virtchnl_msg structure.
60 */ 60 */
61enum i40e_virtchnl_ops { 61enum i40e_virtchnl_ops {
62/* VF sends req. to pf for the following 62/* The PF sends status change events to VFs using
63 * ops. 63 * the I40E_VIRTCHNL_OP_EVENT opcode.
64 * VFs send requests to the PF using the other ops.
64 */ 65 */
65 I40E_VIRTCHNL_OP_UNKNOWN = 0, 66 I40E_VIRTCHNL_OP_UNKNOWN = 0,
66 I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ 67 I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
67 I40E_VIRTCHNL_OP_RESET_VF, 68 I40E_VIRTCHNL_OP_RESET_VF = 2,
68 I40E_VIRTCHNL_OP_GET_VF_RESOURCES, 69 I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
69 I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, 70 I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
70 I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, 71 I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
71 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, 72 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
72 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, 73 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
73 I40E_VIRTCHNL_OP_ENABLE_QUEUES, 74 I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
74 I40E_VIRTCHNL_OP_DISABLE_QUEUES, 75 I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
75 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, 76 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
76 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, 77 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
77 I40E_VIRTCHNL_OP_ADD_VLAN, 78 I40E_VIRTCHNL_OP_ADD_VLAN = 12,
78 I40E_VIRTCHNL_OP_DEL_VLAN, 79 I40E_VIRTCHNL_OP_DEL_VLAN = 13,
79 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 80 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
80 I40E_VIRTCHNL_OP_GET_STATS, 81 I40E_VIRTCHNL_OP_GET_STATS = 15,
81 I40E_VIRTCHNL_OP_FCOE, 82 I40E_VIRTCHNL_OP_FCOE = 16,
82 I40E_VIRTCHNL_OP_CONFIG_RSS, 83 I40E_VIRTCHNL_OP_EVENT = 17,
83/* PF sends status change events to vfs using 84 I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
84 * the following op.
85 */
86 I40E_VIRTCHNL_OP_EVENT,
87}; 85};
88 86
89/* Virtual channel message descriptor. This overlays the admin queue 87/* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 8d8c201c63c1..e089e8f98413 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
36static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
37 "Intel(R) XL710/X710 Virtual Function Network Driver"; 37 "Intel(R) XL710/X710 Virtual Function Network Driver";
38 38
39#define DRV_VERSION "1.2.0" 39#define DRV_VERSION "1.2.2"
40const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
41static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
42 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -761,13 +761,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
761 u8 *macaddr) 761 u8 *macaddr)
762{ 762{
763 struct i40evf_mac_filter *f; 763 struct i40evf_mac_filter *f;
764 int count = 50;
764 765
765 if (!macaddr) 766 if (!macaddr)
766 return NULL; 767 return NULL;
767 768
768 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, 769 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
769 &adapter->crit_section)) 770 &adapter->crit_section)) {
770 udelay(1); 771 udelay(1);
772 if (--count == 0)
773 return NULL;
774 }
771 775
772 f = i40evf_find_filter(adapter, macaddr); 776 f = i40evf_find_filter(adapter, macaddr);
773 if (!f) { 777 if (!f) {
@@ -828,6 +832,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
828 struct i40evf_mac_filter *f, *ftmp; 832 struct i40evf_mac_filter *f, *ftmp;
829 struct netdev_hw_addr *uca; 833 struct netdev_hw_addr *uca;
830 struct netdev_hw_addr *mca; 834 struct netdev_hw_addr *mca;
835 int count = 50;
831 836
832 /* add addr if not already in the filter list */ 837 /* add addr if not already in the filter list */
833 netdev_for_each_uc_addr(uca, netdev) { 838 netdev_for_each_uc_addr(uca, netdev) {
@@ -838,8 +843,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
838 } 843 }
839 844
840 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, 845 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
841 &adapter->crit_section)) 846 &adapter->crit_section)) {
842 udelay(1); 847 udelay(1);
848 if (--count == 0) {
849 dev_err(&adapter->pdev->dev,
850 "Failed to get lock in %s\n", __func__);
851 return;
852 }
853 }
843 /* remove filter if not in netdev list */ 854 /* remove filter if not in netdev list */
844 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { 855 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
845 bool found = false; 856 bool found = false;
@@ -920,7 +931,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
920 for (i = 0; i < adapter->num_active_queues; i++) { 931 for (i = 0; i < adapter->num_active_queues; i++) {
921 struct i40e_ring *ring = adapter->rx_rings[i]; 932 struct i40e_ring *ring = adapter->rx_rings[i];
922 933
923 i40evf_alloc_rx_buffers(ring, ring->count); 934 i40evf_alloc_rx_buffers_1buf(ring, ring->count);
924 ring->next_to_use = ring->count - 1; 935 ring->next_to_use = ring->count - 1;
925 writel(ring->next_to_use, ring->tail); 936 writel(ring->next_to_use, ring->tail);
926 } 937 }
@@ -959,6 +970,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
959 usleep_range(500, 1000); 970 usleep_range(500, 1000);
960 971
961 i40evf_irq_disable(adapter); 972 i40evf_irq_disable(adapter);
973 i40evf_napi_disable_all(adapter);
962 974
963 /* remove all MAC filters */ 975 /* remove all MAC filters */
964 list_for_each_entry(f, &adapter->mac_filter_list, list) { 976 list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -985,8 +997,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
985 997
986 netif_tx_stop_all_queues(netdev); 998 netif_tx_stop_all_queues(netdev);
987 999
988 i40evf_napi_disable_all(adapter);
989
990 msleep(20); 1000 msleep(20);
991 1001
992 netif_carrier_off(netdev); 1002 netif_carrier_off(netdev);
@@ -1481,9 +1491,11 @@ static void i40evf_reset_task(struct work_struct *work)
1481 struct i40evf_adapter *adapter = container_of(work, 1491 struct i40evf_adapter *adapter = container_of(work,
1482 struct i40evf_adapter, 1492 struct i40evf_adapter,
1483 reset_task); 1493 reset_task);
1494 struct net_device *netdev = adapter->netdev;
1484 struct i40e_hw *hw = &adapter->hw; 1495 struct i40e_hw *hw = &adapter->hw;
1485 int i = 0, err; 1496 struct i40evf_mac_filter *f;
1486 uint32_t rstat_val; 1497 uint32_t rstat_val;
1498 int i = 0, err;
1487 1499
1488 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, 1500 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1489 &adapter->crit_section)) 1501 &adapter->crit_section))
@@ -1528,7 +1540,11 @@ static void i40evf_reset_task(struct work_struct *work)
1528 1540
1529 if (netif_running(adapter->netdev)) { 1541 if (netif_running(adapter->netdev)) {
1530 set_bit(__I40E_DOWN, &adapter->vsi.state); 1542 set_bit(__I40E_DOWN, &adapter->vsi.state);
1531 i40evf_down(adapter); 1543 i40evf_irq_disable(adapter);
1544 i40evf_napi_disable_all(adapter);
1545 netif_tx_disable(netdev);
1546 netif_tx_stop_all_queues(netdev);
1547 netif_carrier_off(netdev);
1532 i40evf_free_traffic_irqs(adapter); 1548 i40evf_free_traffic_irqs(adapter);
1533 i40evf_free_all_tx_resources(adapter); 1549 i40evf_free_all_tx_resources(adapter);
1534 i40evf_free_all_rx_resources(adapter); 1550 i40evf_free_all_rx_resources(adapter);
@@ -1560,22 +1576,37 @@ static void i40evf_reset_task(struct work_struct *work)
1560continue_reset: 1576continue_reset:
1561 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1577 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1562 1578
1563 i40evf_down(adapter); 1579 i40evf_irq_disable(adapter);
1580 i40evf_napi_disable_all(adapter);
1581
1582 netif_tx_disable(netdev);
1583
1584 netif_tx_stop_all_queues(netdev);
1585
1586 netif_carrier_off(netdev);
1564 adapter->state = __I40EVF_RESETTING; 1587 adapter->state = __I40EVF_RESETTING;
1565 1588
1566 /* kill and reinit the admin queue */ 1589 /* kill and reinit the admin queue */
1567 if (i40evf_shutdown_adminq(hw)) 1590 if (i40evf_shutdown_adminq(hw))
1568 dev_warn(&adapter->pdev->dev, 1591 dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
1569 "%s: Failed to destroy the Admin Queue resources\n", 1592 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1570 __func__);
1571 err = i40evf_init_adminq(hw); 1593 err = i40evf_init_adminq(hw);
1572 if (err) 1594 if (err)
1573 dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", 1595 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1574 __func__, err); 1596 err);
1575 1597
1576 adapter->aq_pending = 0;
1577 adapter->aq_required = 0;
1578 i40evf_map_queues(adapter); 1598 i40evf_map_queues(adapter);
1599
1600 /* re-add all MAC filters */
1601 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1602 f->add = true;
1603 }
1604 /* re-add all VLAN filters */
1605 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
1606 f->add = true;
1607 }
1608 adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1609 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1579 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1610 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1580 1611
1581 mod_timer(&adapter->watchdog_timer, jiffies + 2); 1612 mod_timer(&adapter->watchdog_timer, jiffies + 2);
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index d9fa999b1685..ae3f28332fa0 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -29,94 +28,93 @@
29#define _E1000_DEFINES_H_ 28#define _E1000_DEFINES_H_
30 29
31/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ 30/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
32#define REQ_TX_DESCRIPTOR_MULTIPLE 8 31#define REQ_TX_DESCRIPTOR_MULTIPLE 8
33#define REQ_RX_DESCRIPTOR_MULTIPLE 8 32#define REQ_RX_DESCRIPTOR_MULTIPLE 8
34 33
35/* IVAR valid bit */ 34/* IVAR valid bit */
36#define E1000_IVAR_VALID 0x80 35#define E1000_IVAR_VALID 0x80
37 36
38/* Receive Descriptor bit definitions */ 37/* Receive Descriptor bit definitions */
39#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 38#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
40#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 39#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
41#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 40#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
42#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 41#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
43#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ 42#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
44#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 43#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
45#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 44#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
46#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 45#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
47#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 46#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
48 47
49#define E1000_RXDEXT_STATERR_LB 0x00040000 48#define E1000_RXDEXT_STATERR_LB 0x00040000
50#define E1000_RXDEXT_STATERR_CE 0x01000000 49#define E1000_RXDEXT_STATERR_CE 0x01000000
51#define E1000_RXDEXT_STATERR_SE 0x02000000 50#define E1000_RXDEXT_STATERR_SE 0x02000000
52#define E1000_RXDEXT_STATERR_SEQ 0x04000000 51#define E1000_RXDEXT_STATERR_SEQ 0x04000000
53#define E1000_RXDEXT_STATERR_CXE 0x10000000 52#define E1000_RXDEXT_STATERR_CXE 0x10000000
54#define E1000_RXDEXT_STATERR_TCPE 0x20000000 53#define E1000_RXDEXT_STATERR_TCPE 0x20000000
55#define E1000_RXDEXT_STATERR_IPE 0x40000000 54#define E1000_RXDEXT_STATERR_IPE 0x40000000
56#define E1000_RXDEXT_STATERR_RXE 0x80000000 55#define E1000_RXDEXT_STATERR_RXE 0x80000000
57
58 56
59/* Same mask, but for extended and packet split descriptors */ 57/* Same mask, but for extended and packet split descriptors */
60#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 58#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
61 E1000_RXDEXT_STATERR_CE | \ 59 E1000_RXDEXT_STATERR_CE | \
62 E1000_RXDEXT_STATERR_SE | \ 60 E1000_RXDEXT_STATERR_SE | \
63 E1000_RXDEXT_STATERR_SEQ | \ 61 E1000_RXDEXT_STATERR_SEQ | \
64 E1000_RXDEXT_STATERR_CXE | \ 62 E1000_RXDEXT_STATERR_CXE | \
65 E1000_RXDEXT_STATERR_RXE) 63 E1000_RXDEXT_STATERR_RXE)
66 64
67/* Device Control */ 65/* Device Control */
68#define E1000_CTRL_RST 0x04000000 /* Global reset */ 66#define E1000_CTRL_RST 0x04000000 /* Global reset */
69 67
70/* Device Status */ 68/* Device Status */
71#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 69#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
72#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 70#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
73#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ 71#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
74#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ 72#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
75#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 73#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
76#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 74#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
77 75
78#define SPEED_10 10 76#define SPEED_10 10
79#define SPEED_100 100 77#define SPEED_100 100
80#define SPEED_1000 1000 78#define SPEED_1000 1000
81#define HALF_DUPLEX 1 79#define HALF_DUPLEX 1
82#define FULL_DUPLEX 2 80#define FULL_DUPLEX 2
83 81
84/* Transmit Descriptor bit definitions */ 82/* Transmit Descriptor bit definitions */
85#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ 83#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
86#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ 84#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
87#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ 85#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
88#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ 86#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */
89 87
90#define MAX_JUMBO_FRAME_SIZE 0x3F00 88#define MAX_JUMBO_FRAME_SIZE 0x3F00
91 89
92/* 802.1q VLAN Packet Size */ 90/* 802.1q VLAN Packet Size */
93#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ 91#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
94 92
95/* Error Codes */ 93/* Error Codes */
96#define E1000_SUCCESS 0 94#define E1000_SUCCESS 0
97#define E1000_ERR_CONFIG 3 95#define E1000_ERR_CONFIG 3
98#define E1000_ERR_MAC_INIT 5 96#define E1000_ERR_MAC_INIT 5
99#define E1000_ERR_MBX 15 97#define E1000_ERR_MBX 15
100 98
101/* SRRCTL bit definitions */ 99/* SRRCTL bit definitions */
102#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ 100#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
103#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 101#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
104#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ 102#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
105#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 103#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
106#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 104#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
107#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 105#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
108#define E1000_SRRCTL_DROP_EN 0x80000000 106#define E1000_SRRCTL_DROP_EN 0x80000000
109 107
110#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F 108#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
111#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 109#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
112 110
113/* Additional Descriptor Control definitions */ 111/* Additional Descriptor Control definitions */
114#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ 112#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */
115#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ 113#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */
116 114
117/* Direct Cache Access (DCA) definitions */ 115/* Direct Cache Access (DCA) definitions */
118#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 116#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
119 117
120#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ 118#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
121 119
122#endif /* _E1000_DEFINES_H_ */ 120#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2178f87e9f61..c6996feb1cb4 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -36,7 +35,6 @@
36#include "igbvf.h" 35#include "igbvf.h"
37#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
38 37
39
40struct igbvf_stats { 38struct igbvf_stats {
41 char stat_string[ETH_GSTRING_LEN]; 39 char stat_string[ETH_GSTRING_LEN];
42 int sizeof_stat; 40 int sizeof_stat;
@@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
74#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) 72#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
75 73
76static int igbvf_get_settings(struct net_device *netdev, 74static int igbvf_get_settings(struct net_device *netdev,
77 struct ethtool_cmd *ecmd) 75 struct ethtool_cmd *ecmd)
78{ 76{
79 struct igbvf_adapter *adapter = netdev_priv(netdev); 77 struct igbvf_adapter *adapter = netdev_priv(netdev);
80 struct e1000_hw *hw = &adapter->hw; 78 struct e1000_hw *hw = &adapter->hw;
@@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev,
111} 109}
112 110
113static int igbvf_set_settings(struct net_device *netdev, 111static int igbvf_set_settings(struct net_device *netdev,
114 struct ethtool_cmd *ecmd) 112 struct ethtool_cmd *ecmd)
115{ 113{
116 return -EOPNOTSUPP; 114 return -EOPNOTSUPP;
117} 115}
118 116
119static void igbvf_get_pauseparam(struct net_device *netdev, 117static void igbvf_get_pauseparam(struct net_device *netdev,
120 struct ethtool_pauseparam *pause) 118 struct ethtool_pauseparam *pause)
121{ 119{
122} 120}
123 121
124static int igbvf_set_pauseparam(struct net_device *netdev, 122static int igbvf_set_pauseparam(struct net_device *netdev,
125 struct ethtool_pauseparam *pause) 123 struct ethtool_pauseparam *pause)
126{ 124{
127 return -EOPNOTSUPP; 125 return -EOPNOTSUPP;
128} 126}
@@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev,
130static u32 igbvf_get_msglevel(struct net_device *netdev) 128static u32 igbvf_get_msglevel(struct net_device *netdev)
131{ 129{
132 struct igbvf_adapter *adapter = netdev_priv(netdev); 130 struct igbvf_adapter *adapter = netdev_priv(netdev);
131
133 return adapter->msg_enable; 132 return adapter->msg_enable;
134} 133}
135 134
136static void igbvf_set_msglevel(struct net_device *netdev, u32 data) 135static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
137{ 136{
138 struct igbvf_adapter *adapter = netdev_priv(netdev); 137 struct igbvf_adapter *adapter = netdev_priv(netdev);
138
139 adapter->msg_enable = data; 139 adapter->msg_enable = data;
140} 140}
141 141
@@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev)
146} 146}
147 147
148static void igbvf_get_regs(struct net_device *netdev, 148static void igbvf_get_regs(struct net_device *netdev,
149 struct ethtool_regs *regs, void *p) 149 struct ethtool_regs *regs, void *p)
150{ 150{
151 struct igbvf_adapter *adapter = netdev_priv(netdev); 151 struct igbvf_adapter *adapter = netdev_priv(netdev);
152 struct e1000_hw *hw = &adapter->hw; 152 struct e1000_hw *hw = &adapter->hw;
@@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev)
175} 175}
176 176
177static int igbvf_get_eeprom(struct net_device *netdev, 177static int igbvf_get_eeprom(struct net_device *netdev,
178 struct ethtool_eeprom *eeprom, u8 *bytes) 178 struct ethtool_eeprom *eeprom, u8 *bytes)
179{ 179{
180 return -EOPNOTSUPP; 180 return -EOPNOTSUPP;
181} 181}
182 182
183static int igbvf_set_eeprom(struct net_device *netdev, 183static int igbvf_set_eeprom(struct net_device *netdev,
184 struct ethtool_eeprom *eeprom, u8 *bytes) 184 struct ethtool_eeprom *eeprom, u8 *bytes)
185{ 185{
186 return -EOPNOTSUPP; 186 return -EOPNOTSUPP;
187} 187}
188 188
189static void igbvf_get_drvinfo(struct net_device *netdev, 189static void igbvf_get_drvinfo(struct net_device *netdev,
190 struct ethtool_drvinfo *drvinfo) 190 struct ethtool_drvinfo *drvinfo)
191{ 191{
192 struct igbvf_adapter *adapter = netdev_priv(netdev); 192 struct igbvf_adapter *adapter = netdev_priv(netdev);
193 193
@@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
201} 201}
202 202
203static void igbvf_get_ringparam(struct net_device *netdev, 203static void igbvf_get_ringparam(struct net_device *netdev,
204 struct ethtool_ringparam *ring) 204 struct ethtool_ringparam *ring)
205{ 205{
206 struct igbvf_adapter *adapter = netdev_priv(netdev); 206 struct igbvf_adapter *adapter = netdev_priv(netdev);
207 struct igbvf_ring *tx_ring = adapter->tx_ring; 207 struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev,
214} 214}
215 215
216static int igbvf_set_ringparam(struct net_device *netdev, 216static int igbvf_set_ringparam(struct net_device *netdev,
217 struct ethtool_ringparam *ring) 217 struct ethtool_ringparam *ring)
218{ 218{
219 struct igbvf_adapter *adapter = netdev_priv(netdev); 219 struct igbvf_adapter *adapter = netdev_priv(netdev);
220 struct igbvf_ring *temp_ring; 220 struct igbvf_ring *temp_ring;
@@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev,
224 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 224 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
225 return -EINVAL; 225 return -EINVAL;
226 226
227 new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); 227 new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD);
228 new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); 228 new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD);
229 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 229 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
230 230
231 new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); 231 new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD);
232 new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); 232 new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD);
233 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 233 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
234 234
235 if ((new_tx_count == adapter->tx_ring->count) && 235 if ((new_tx_count == adapter->tx_ring->count) &&
@@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
239 } 239 }
240 240
241 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 241 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
242 msleep(1); 242 usleep_range(1000, 2000);
243 243
244 if (!netif_running(adapter->netdev)) { 244 if (!netif_running(adapter->netdev)) {
245 adapter->tx_ring->count = new_tx_count; 245 adapter->tx_ring->count = new_tx_count;
@@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev,
255 255
256 igbvf_down(adapter); 256 igbvf_down(adapter);
257 257
258 /* 258 /* We can't just free everything and then setup again,
259 * We can't just free everything and then setup again,
260 * because the ISRs in MSI-X mode get passed pointers 259 * because the ISRs in MSI-X mode get passed pointers
261 * to the tx and rx ring structs. 260 * to the Tx and Rx ring structs.
262 */ 261 */
263 if (new_tx_count != adapter->tx_ring->count) { 262 if (new_tx_count != adapter->tx_ring->count) {
264 memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); 263 memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
@@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
283 282
284 igbvf_free_rx_resources(adapter->rx_ring); 283 igbvf_free_rx_resources(adapter->rx_ring);
285 284
286 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); 285 memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring));
287 } 286 }
288err_setup: 287err_setup:
289 igbvf_up(adapter); 288 igbvf_up(adapter);
@@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
307} 306}
308 307
309static void igbvf_diag_test(struct net_device *netdev, 308static void igbvf_diag_test(struct net_device *netdev,
310 struct ethtool_test *eth_test, u64 *data) 309 struct ethtool_test *eth_test, u64 *data)
311{ 310{
312 struct igbvf_adapter *adapter = netdev_priv(netdev); 311 struct igbvf_adapter *adapter = netdev_priv(netdev);
313 312
314 set_bit(__IGBVF_TESTING, &adapter->state); 313 set_bit(__IGBVF_TESTING, &adapter->state);
315 314
316 /* 315 /* Link test performed before hardware reset so autoneg doesn't
317 * Link test performed before hardware reset so autoneg doesn't
318 * interfere with test result 316 * interfere with test result
319 */ 317 */
320 if (igbvf_link_test(adapter, &data[0])) 318 if (igbvf_link_test(adapter, &data[0]))
@@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev,
325} 323}
326 324
327static void igbvf_get_wol(struct net_device *netdev, 325static void igbvf_get_wol(struct net_device *netdev,
328 struct ethtool_wolinfo *wol) 326 struct ethtool_wolinfo *wol)
329{ 327{
330 wol->supported = 0; 328 wol->supported = 0;
331 wol->wolopts = 0; 329 wol->wolopts = 0;
332} 330}
333 331
334static int igbvf_set_wol(struct net_device *netdev, 332static int igbvf_set_wol(struct net_device *netdev,
335 struct ethtool_wolinfo *wol) 333 struct ethtool_wolinfo *wol)
336{ 334{
337 return -EOPNOTSUPP; 335 return -EOPNOTSUPP;
338} 336}
339 337
340static int igbvf_get_coalesce(struct net_device *netdev, 338static int igbvf_get_coalesce(struct net_device *netdev,
341 struct ethtool_coalesce *ec) 339 struct ethtool_coalesce *ec)
342{ 340{
343 struct igbvf_adapter *adapter = netdev_priv(netdev); 341 struct igbvf_adapter *adapter = netdev_priv(netdev);
344 342
@@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev,
351} 349}
352 350
353static int igbvf_set_coalesce(struct net_device *netdev, 351static int igbvf_set_coalesce(struct net_device *netdev,
354 struct ethtool_coalesce *ec) 352 struct ethtool_coalesce *ec)
355{ 353{
356 struct igbvf_adapter *adapter = netdev_priv(netdev); 354 struct igbvf_adapter *adapter = netdev_priv(netdev);
357 struct e1000_hw *hw = &adapter->hw; 355 struct e1000_hw *hw = &adapter->hw;
358 356
359 if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && 357 if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
360 (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { 358 (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
361 adapter->current_itr = ec->rx_coalesce_usecs << 2; 359 adapter->current_itr = ec->rx_coalesce_usecs << 2;
362 adapter->requested_itr = 1000000000 / 360 adapter->requested_itr = 1000000000 /
363 (adapter->current_itr * 256); 361 (adapter->current_itr * 256);
@@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev,
366 adapter->current_itr = IGBVF_START_ITR; 364 adapter->current_itr = IGBVF_START_ITR;
367 adapter->requested_itr = ec->rx_coalesce_usecs; 365 adapter->requested_itr = ec->rx_coalesce_usecs;
368 } else if (ec->rx_coalesce_usecs == 0) { 366 } else if (ec->rx_coalesce_usecs == 0) {
369 /* 367 /* The user's desire is to turn off interrupt throttling
370 * The user's desire is to turn off interrupt throttling
371 * altogether, but due to HW limitations, we can't do that. 368 * altogether, but due to HW limitations, we can't do that.
372 * Instead we set a very small value in EITR, which would 369 * Instead we set a very small value in EITR, which would
373 * allow ~967k interrupts per second, but allow the adapter's 370 * allow ~967k interrupts per second, but allow the adapter's
@@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev,
376 adapter->current_itr = 4; 373 adapter->current_itr = 4;
377 adapter->requested_itr = 1000000000 / 374 adapter->requested_itr = 1000000000 /
378 (adapter->current_itr * 256); 375 (adapter->current_itr * 256);
379 } else 376 } else {
380 return -EINVAL; 377 return -EINVAL;
378 }
381 379
382 writel(adapter->current_itr, 380 writel(adapter->current_itr,
383 hw->hw_addr + adapter->rx_ring->itr_register); 381 hw->hw_addr + adapter->rx_ring->itr_register);
@@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev,
388static int igbvf_nway_reset(struct net_device *netdev) 386static int igbvf_nway_reset(struct net_device *netdev)
389{ 387{
390 struct igbvf_adapter *adapter = netdev_priv(netdev); 388 struct igbvf_adapter *adapter = netdev_priv(netdev);
389
391 if (netif_running(netdev)) 390 if (netif_running(netdev))
392 igbvf_reinit_locked(adapter); 391 igbvf_reinit_locked(adapter);
393 return 0; 392 return 0;
394} 393}
395 394
396
397static void igbvf_get_ethtool_stats(struct net_device *netdev, 395static void igbvf_get_ethtool_stats(struct net_device *netdev,
398 struct ethtool_stats *stats, 396 struct ethtool_stats *stats,
399 u64 *data) 397 u64 *data)
400{ 398{
401 struct igbvf_adapter *adapter = netdev_priv(netdev); 399 struct igbvf_adapter *adapter = netdev_priv(netdev);
402 int i; 400 int i;
@@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
404 igbvf_update_stats(adapter); 402 igbvf_update_stats(adapter);
405 for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { 403 for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
406 char *p = (char *)adapter + 404 char *p = (char *)adapter +
407 igbvf_gstrings_stats[i].stat_offset; 405 igbvf_gstrings_stats[i].stat_offset;
408 char *b = (char *)adapter + 406 char *b = (char *)adapter +
409 igbvf_gstrings_stats[i].base_stat_offset; 407 igbvf_gstrings_stats[i].base_stat_offset;
410 data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == 408 data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
411 sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : 409 sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
412 (*(u32 *)p - *(u32 *)b)); 410 (*(u32 *)p - *(u32 *)b));
413 } 411 }
414
415} 412}
416 413
417static int igbvf_get_sset_count(struct net_device *dev, int stringset) 414static int igbvf_get_sset_count(struct net_device *dev, int stringset)
418{ 415{
419 switch(stringset) { 416 switch (stringset) {
420 case ETH_SS_TEST: 417 case ETH_SS_TEST:
421 return IGBVF_TEST_LEN; 418 return IGBVF_TEST_LEN;
422 case ETH_SS_STATS: 419 case ETH_SS_STATS:
@@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset)
427} 424}
428 425
429static void igbvf_get_strings(struct net_device *netdev, u32 stringset, 426static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
430 u8 *data) 427 u8 *data)
431{ 428{
432 u8 *p = data; 429 u8 *p = data;
433 int i; 430 int i;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 7d6a25c8f889..f166baab8d7e 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -43,10 +42,10 @@ struct igbvf_info;
43struct igbvf_adapter; 42struct igbvf_adapter;
44 43
45/* Interrupt defines */ 44/* Interrupt defines */
46#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ 45#define IGBVF_START_ITR 488 /* ~8000 ints/sec */
47#define IGBVF_4K_ITR 980 46#define IGBVF_4K_ITR 980
48#define IGBVF_20K_ITR 196 47#define IGBVF_20K_ITR 196
49#define IGBVF_70K_ITR 56 48#define IGBVF_70K_ITR 56
50 49
51enum latency_range { 50enum latency_range {
52 lowest_latency = 0, 51 lowest_latency = 0,
@@ -55,56 +54,55 @@ enum latency_range {
55 latency_invalid = 255 54 latency_invalid = 255
56}; 55};
57 56
58
59/* Interrupt modes, as used by the IntMode parameter */ 57/* Interrupt modes, as used by the IntMode parameter */
60#define IGBVF_INT_MODE_LEGACY 0 58#define IGBVF_INT_MODE_LEGACY 0
61#define IGBVF_INT_MODE_MSI 1 59#define IGBVF_INT_MODE_MSI 1
62#define IGBVF_INT_MODE_MSIX 2 60#define IGBVF_INT_MODE_MSIX 2
63 61
64/* Tx/Rx descriptor defines */ 62/* Tx/Rx descriptor defines */
65#define IGBVF_DEFAULT_TXD 256 63#define IGBVF_DEFAULT_TXD 256
66#define IGBVF_MAX_TXD 4096 64#define IGBVF_MAX_TXD 4096
67#define IGBVF_MIN_TXD 80 65#define IGBVF_MIN_TXD 80
68 66
69#define IGBVF_DEFAULT_RXD 256 67#define IGBVF_DEFAULT_RXD 256
70#define IGBVF_MAX_RXD 4096 68#define IGBVF_MAX_RXD 4096
71#define IGBVF_MIN_RXD 80 69#define IGBVF_MIN_RXD 80
72 70
73#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ 71#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
74#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ 72#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
75 73
76/* RX descriptor control thresholds. 74/* RX descriptor control thresholds.
77 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 75 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
78 * descriptors available in its onboard memory. 76 * descriptors available in its onboard memory.
79 * Setting this to 0 disables RX descriptor prefetch. 77 * Setting this to 0 disables RX descriptor prefetch.
80 * HTHRESH - MAC will only prefetch if there are at least this many descriptors 78 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
81 * available in host memory. 79 * available in host memory.
82 * If PTHRESH is 0, this should also be 0. 80 * If PTHRESH is 0, this should also be 0.
83 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back 81 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
84 * descriptors until either it has this many to write back, or the 82 * descriptors until either it has this many to write back, or the
85 * ITR timer expires. 83 * ITR timer expires.
86 */ 84 */
87#define IGBVF_RX_PTHRESH 16 85#define IGBVF_RX_PTHRESH 16
88#define IGBVF_RX_HTHRESH 8 86#define IGBVF_RX_HTHRESH 8
89#define IGBVF_RX_WTHRESH 1 87#define IGBVF_RX_WTHRESH 1
90 88
91/* this is the size past which hardware will drop packets when setting LPE=0 */ 89/* this is the size past which hardware will drop packets when setting LPE=0 */
92#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 90#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
93 91
94#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ 92#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */
95 93
96/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 94/* How many Tx Descriptors do we need to call netif_wake_queue ? */
97#define IGBVF_TX_QUEUE_WAKE 32 95#define IGBVF_TX_QUEUE_WAKE 32
98/* How many Rx Buffers do we bundle into one write to the hardware ? */ 96/* How many Rx Buffers do we bundle into one write to the hardware ? */
99#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 97#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
100 98
101#define AUTO_ALL_MODES 0 99#define AUTO_ALL_MODES 0
102#define IGBVF_EEPROM_APME 0x0400 100#define IGBVF_EEPROM_APME 0x0400
103 101
104#define IGBVF_MNG_VLAN_NONE (-1) 102#define IGBVF_MNG_VLAN_NONE (-1)
105 103
106/* Number of packet split data buffers (not including the header buffer) */ 104/* Number of packet split data buffers (not including the header buffer) */
107#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) 105#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
108 106
109enum igbvf_boards { 107enum igbvf_boards {
110 board_vf, 108 board_vf,
@@ -116,8 +114,7 @@ struct igbvf_queue_stats {
116 u64 bytes; 114 u64 bytes;
117}; 115};
118 116
119/* 117/* wrappers around a pointer to a socket buffer,
120 * wrappers around a pointer to a socket buffer,
121 * so a DMA handle can be stored along with the buffer 118 * so a DMA handle can be stored along with the buffer
122 */ 119 */
123struct igbvf_buffer { 120struct igbvf_buffer {
@@ -148,10 +145,10 @@ union igbvf_desc {
148 145
149struct igbvf_ring { 146struct igbvf_ring {
150 struct igbvf_adapter *adapter; /* backlink */ 147 struct igbvf_adapter *adapter; /* backlink */
151 union igbvf_desc *desc; /* pointer to ring memory */ 148 union igbvf_desc *desc; /* pointer to ring memory */
152 dma_addr_t dma; /* phys address of ring */ 149 dma_addr_t dma; /* phys address of ring */
153 unsigned int size; /* length of ring in bytes */ 150 unsigned int size; /* length of ring in bytes */
154 unsigned int count; /* number of desc. in ring */ 151 unsigned int count; /* number of desc. in ring */
155 152
156 u16 next_to_use; 153 u16 next_to_use;
157 u16 next_to_clean; 154 u16 next_to_clean;
@@ -202,9 +199,7 @@ struct igbvf_adapter {
202 u32 requested_itr; /* ints/sec or adaptive */ 199 u32 requested_itr; /* ints/sec or adaptive */
203 u32 current_itr; /* Actual ITR register value, not ints/sec */ 200 u32 current_itr; /* Actual ITR register value, not ints/sec */
204 201
205 /* 202 /* Tx */
206 * Tx
207 */
208 struct igbvf_ring *tx_ring /* One per active queue */ 203 struct igbvf_ring *tx_ring /* One per active queue */
209 ____cacheline_aligned_in_smp; 204 ____cacheline_aligned_in_smp;
210 205
@@ -226,9 +221,7 @@ struct igbvf_adapter {
226 u32 tx_fifo_size; 221 u32 tx_fifo_size;
227 u32 tx_dma_failed; 222 u32 tx_dma_failed;
228 223
229 /* 224 /* Rx */
230 * Rx
231 */
232 struct igbvf_ring *rx_ring; 225 struct igbvf_ring *rx_ring;
233 226
234 u32 rx_int_delay; 227 u32 rx_int_delay;
@@ -249,7 +242,7 @@ struct igbvf_adapter {
249 struct net_device *netdev; 242 struct net_device *netdev;
250 struct pci_dev *pdev; 243 struct pci_dev *pdev;
251 struct net_device_stats net_stats; 244 struct net_device_stats net_stats;
252 spinlock_t stats_lock; /* prevent concurrent stats updates */ 245 spinlock_t stats_lock; /* prevent concurrent stats updates */
253 246
254 /* structs defined in e1000_hw.h */ 247 /* structs defined in e1000_hw.h */
255 struct e1000_hw hw; 248 struct e1000_hw hw;
@@ -286,16 +279,16 @@ struct igbvf_adapter {
286}; 279};
287 280
288struct igbvf_info { 281struct igbvf_info {
289 enum e1000_mac_type mac; 282 enum e1000_mac_type mac;
290 unsigned int flags; 283 unsigned int flags;
291 u32 pba; 284 u32 pba;
292 void (*init_ops)(struct e1000_hw *); 285 void (*init_ops)(struct e1000_hw *);
293 s32 (*get_variants)(struct igbvf_adapter *); 286 s32 (*get_variants)(struct igbvf_adapter *);
294}; 287};
295 288
296/* hardware capability, feature, and workaround flags */ 289/* hardware capability, feature, and workaround flags */
297#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) 290#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
298#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) 291#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
299#define IGBVF_RX_DESC_ADV(R, i) \ 292#define IGBVF_RX_DESC_ADV(R, i) \
300 (&((((R).desc))[i].rx_desc)) 293 (&((((R).desc))[i].rx_desc))
301#define IGBVF_TX_DESC_ADV(R, i) \ 294#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index b4b65bc9fc5d..7b6cb4c3764c 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -54,10 +53,10 @@ out:
54} 53}
55 54
56/** 55/**
57 * e1000_poll_for_ack - Wait for message acknowledgement 56 * e1000_poll_for_ack - Wait for message acknowledgment
58 * @hw: pointer to the HW structure 57 * @hw: pointer to the HW structure
59 * 58 *
60 * returns SUCCESS if it successfully received a message acknowledgement 59 * returns SUCCESS if it successfully received a message acknowledgment
61 **/ 60 **/
62static s32 e1000_poll_for_ack(struct e1000_hw *hw) 61static s32 e1000_poll_for_ack(struct e1000_hw *hw)
63{ 62{
@@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
218 s32 ret_val = -E1000_ERR_MBX; 217 s32 ret_val = -E1000_ERR_MBX;
219 218
220 if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | 219 if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
221 E1000_V2PMAILBOX_RSTI))) { 220 E1000_V2PMAILBOX_RSTI))) {
222 ret_val = E1000_SUCCESS; 221 ret_val = E1000_SUCCESS;
223 hw->mbx.stats.rsts++; 222 hw->mbx.stats.rsts++;
224 } 223 }
@@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
239 /* Take ownership of the buffer */ 238 /* Take ownership of the buffer */
240 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); 239 ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
241 240
242 /* reserve mailbox for vf use */ 241 /* reserve mailbox for VF use */
243 if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) 242 if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
244 ret_val = E1000_SUCCESS; 243 ret_val = E1000_SUCCESS;
245 244
@@ -283,7 +282,7 @@ out_no_write:
283} 282}
284 283
285/** 284/**
286 * e1000_read_mbx_vf - Reads a message from the inbox intended for vf 285 * e1000_read_mbx_vf - Reads a message from the inbox intended for VF
287 * @hw: pointer to the HW structure 286 * @hw: pointer to the HW structure
288 * @msg: The message buffer 287 * @msg: The message buffer
289 * @size: Length of buffer 288 * @size: Length of buffer
@@ -315,17 +314,18 @@ out_no_read:
315} 314}
316 315
317/** 316/**
318 * e1000_init_mbx_params_vf - set initial values for vf mailbox 317 * e1000_init_mbx_params_vf - set initial values for VF mailbox
319 * @hw: pointer to the HW structure 318 * @hw: pointer to the HW structure
320 * 319 *
321 * Initializes the hw->mbx struct to correct values for vf mailbox 320 * Initializes the hw->mbx struct to correct values for VF mailbox
322 */ 321 */
323s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) 322s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
324{ 323{
325 struct e1000_mbx_info *mbx = &hw->mbx; 324 struct e1000_mbx_info *mbx = &hw->mbx;
326 325
327 /* start mailbox as timed out and let the reset_hw call set the timeout 326 /* start mailbox as timed out and let the reset_hw call set the timeout
328 * value to being communications */ 327 * value to being communications
328 */
329 mbx->timeout = 0; 329 mbx->timeout = 0;
330 mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; 330 mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
331 331
@@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
347 347
348 return E1000_SUCCESS; 348 return E1000_SUCCESS;
349} 349}
350
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index 24370bcb0e22..f800bf8eedae 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -30,44 +29,44 @@
30 29
31#include "vf.h" 30#include "vf.h"
32 31
33#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ 32#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
34#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ 33#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
35#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ 34#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ 35#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ 36#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
38#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ 37#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
39#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ 38#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
40#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ 39#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
41#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ 40#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
42 41
43#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ 42#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
44 43
45/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the 44/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
46 * PF. The reverse is true if it is E1000_PF_*. 45 * PF. The reverse is true if it is E1000_PF_*.
47 * Message ACK's are the value or'd with 0xF0000000 46 * Message ACK's are the value or'd with 0xF0000000
48 */ 47 */
49#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 48/* Messages below or'd with this are the ACK */
50 * this are the ACK */ 49#define E1000_VT_MSGTYPE_ACK 0x80000000
51#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 50/* Messages below or'd with this are the NACK */
52 * this are the NACK */ 51#define E1000_VT_MSGTYPE_NACK 0x40000000
53#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 52/* Indicates that VF is still clear to send requests */
54 clear to send requests */ 53#define E1000_VT_MSGTYPE_CTS 0x20000000
55 54
56/* We have a total wait time of 1s for vf mailbox posted messages */ 55/* We have a total wait time of 1s for vf mailbox posted messages */
57#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ 56#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */
58#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ 57#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */
59 58
60#define E1000_VT_MSGINFO_SHIFT 16 59#define E1000_VT_MSGINFO_SHIFT 16
61/* bits 23:16 are used for exra info for certain messages */ 60/* bits 23:16 are used for exra info for certain messages */
62#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 61#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
63 62
64#define E1000_VF_RESET 0x01 /* VF requests reset */ 63#define E1000_VF_RESET 0x01 /* VF requests reset */
65#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 64#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
66#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 65#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
67#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 66#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
68#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 67#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
69 68
70#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 69#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
71 70
72void e1000_init_mbx_ops_generic(struct e1000_hw *hw); 71void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
73s32 e1000_init_mbx_params_vf(struct e1000_hw *); 72s32 e1000_init_mbx_params_vf(struct e1000_hw *);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index ebf9d4a42fdd..c17ea4b8f84d 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
66static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); 65static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
67 66
68static struct igbvf_info igbvf_vf_info = { 67static struct igbvf_info igbvf_vf_info = {
69 .mac = e1000_vfadapt, 68 .mac = e1000_vfadapt,
70 .flags = 0, 69 .flags = 0,
71 .pba = 10, 70 .pba = 10,
72 .init_ops = e1000_init_function_pointers_vf, 71 .init_ops = e1000_init_function_pointers_vf,
73}; 72};
74 73
75static struct igbvf_info igbvf_i350_vf_info = { 74static struct igbvf_info igbvf_i350_vf_info = {
76 .mac = e1000_vfadapt_i350, 75 .mac = e1000_vfadapt_i350,
77 .flags = 0, 76 .flags = 0,
78 .pba = 10, 77 .pba = 10,
79 .init_ops = e1000_init_function_pointers_vf, 78 .init_ops = e1000_init_function_pointers_vf,
80}; 79};
81 80
82static const struct igbvf_info *igbvf_info_tbl[] = { 81static const struct igbvf_info *igbvf_info_tbl[] = {
83 [board_vf] = &igbvf_vf_info, 82 [board_vf] = &igbvf_vf_info,
84 [board_i350_vf] = &igbvf_i350_vf_info, 83 [board_i350_vf] = &igbvf_i350_vf_info,
85}; 84};
86 85
87/** 86/**
88 * igbvf_desc_unused - calculate if we have unused descriptors 87 * igbvf_desc_unused - calculate if we have unused descriptors
88 * @rx_ring: address of receive ring structure
89 **/ 89 **/
90static int igbvf_desc_unused(struct igbvf_ring *ring) 90static int igbvf_desc_unused(struct igbvf_ring *ring)
91{ 91{
@@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
103 * @skb: pointer to sk_buff to be indicated to stack 103 * @skb: pointer to sk_buff to be indicated to stack
104 **/ 104 **/
105static void igbvf_receive_skb(struct igbvf_adapter *adapter, 105static void igbvf_receive_skb(struct igbvf_adapter *adapter,
106 struct net_device *netdev, 106 struct net_device *netdev,
107 struct sk_buff *skb, 107 struct sk_buff *skb,
108 u32 status, u16 vlan) 108 u32 status, u16 vlan)
109{ 109{
110 u16 vid; 110 u16 vid;
111 111
@@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
123} 123}
124 124
125static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 125static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
126 u32 status_err, struct sk_buff *skb) 126 u32 status_err, struct sk_buff *skb)
127{ 127{
128 skb_checksum_none_assert(skb); 128 skb_checksum_none_assert(skb);
129 129
@@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
153 * @cleaned_count: number of buffers to repopulate 153 * @cleaned_count: number of buffers to repopulate
154 **/ 154 **/
155static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, 155static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
156 int cleaned_count) 156 int cleaned_count)
157{ 157{
158 struct igbvf_adapter *adapter = rx_ring->adapter; 158 struct igbvf_adapter *adapter = rx_ring->adapter;
159 struct net_device *netdev = adapter->netdev; 159 struct net_device *netdev = adapter->netdev;
@@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
188 } 188 }
189 buffer_info->page_dma = 189 buffer_info->page_dma =
190 dma_map_page(&pdev->dev, buffer_info->page, 190 dma_map_page(&pdev->dev, buffer_info->page,
191 buffer_info->page_offset, 191 buffer_info->page_offset,
192 PAGE_SIZE / 2, 192 PAGE_SIZE / 2,
193 DMA_FROM_DEVICE); 193 DMA_FROM_DEVICE);
194 if (dma_mapping_error(&pdev->dev, 194 if (dma_mapping_error(&pdev->dev,
195 buffer_info->page_dma)) { 195 buffer_info->page_dma)) {
@@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
209 209
210 buffer_info->skb = skb; 210 buffer_info->skb = skb;
211 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 211 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
212 bufsz, 212 bufsz,
213 DMA_FROM_DEVICE); 213 DMA_FROM_DEVICE);
214 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 214 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
215 dev_kfree_skb(buffer_info->skb); 215 dev_kfree_skb(buffer_info->skb);
@@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
219 } 219 }
220 } 220 }
221 /* Refresh the desc even if buffer_addrs didn't change because 221 /* Refresh the desc even if buffer_addrs didn't change because
222 * each write-back erases this info. */ 222 * each write-back erases this info.
223 */
223 if (adapter->rx_ps_hdr_size) { 224 if (adapter->rx_ps_hdr_size) {
224 rx_desc->read.pkt_addr = 225 rx_desc->read.pkt_addr =
225 cpu_to_le64(buffer_info->page_dma); 226 cpu_to_le64(buffer_info->page_dma);
226 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 227 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
227 } else { 228 } else {
228 rx_desc->read.pkt_addr = 229 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
229 cpu_to_le64(buffer_info->dma);
230 rx_desc->read.hdr_addr = 0; 230 rx_desc->read.hdr_addr = 0;
231 } 231 }
232 232
@@ -247,7 +247,8 @@ no_buffers:
247 /* Force memory writes to complete before letting h/w 247 /* Force memory writes to complete before letting h/w
248 * know there are new descriptors to fetch. (Only 248 * know there are new descriptors to fetch. (Only
249 * applicable for weak-ordered memory model archs, 249 * applicable for weak-ordered memory model archs,
250 * such as IA-64). */ 250 * such as IA-64).
251 */
251 wmb(); 252 wmb();
252 writel(i, adapter->hw.hw_addr + rx_ring->tail); 253 writel(i, adapter->hw.hw_addr + rx_ring->tail);
253 } 254 }
@@ -261,7 +262,7 @@ no_buffers:
261 * is no guarantee that everything was cleaned 262 * is no guarantee that everything was cleaned
262 **/ 263 **/
263static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, 264static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
264 int *work_done, int work_to_do) 265 int *work_done, int work_to_do)
265{ 266{
266 struct igbvf_ring *rx_ring = adapter->rx_ring; 267 struct igbvf_ring *rx_ring = adapter->rx_ring;
267 struct net_device *netdev = adapter->netdev; 268 struct net_device *netdev = adapter->netdev;
@@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
292 * that case, it fills the header buffer and spills the rest 293 * that case, it fills the header buffer and spills the rest
293 * into the page. 294 * into the page.
294 */ 295 */
295 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & 296 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
296 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 297 & E1000_RXDADV_HDRBUFLEN_MASK) >>
298 E1000_RXDADV_HDRBUFLEN_SHIFT;
297 if (hlen > adapter->rx_ps_hdr_size) 299 if (hlen > adapter->rx_ps_hdr_size)
298 hlen = adapter->rx_ps_hdr_size; 300 hlen = adapter->rx_ps_hdr_size;
299 301
@@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
306 buffer_info->skb = NULL; 308 buffer_info->skb = NULL;
307 if (!adapter->rx_ps_hdr_size) { 309 if (!adapter->rx_ps_hdr_size) {
308 dma_unmap_single(&pdev->dev, buffer_info->dma, 310 dma_unmap_single(&pdev->dev, buffer_info->dma,
309 adapter->rx_buffer_len, 311 adapter->rx_buffer_len,
310 DMA_FROM_DEVICE); 312 DMA_FROM_DEVICE);
311 buffer_info->dma = 0; 313 buffer_info->dma = 0;
312 skb_put(skb, length); 314 skb_put(skb, length);
@@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
315 317
316 if (!skb_shinfo(skb)->nr_frags) { 318 if (!skb_shinfo(skb)->nr_frags) {
317 dma_unmap_single(&pdev->dev, buffer_info->dma, 319 dma_unmap_single(&pdev->dev, buffer_info->dma,
318 adapter->rx_ps_hdr_size, 320 adapter->rx_ps_hdr_size,
319 DMA_FROM_DEVICE); 321 DMA_FROM_DEVICE);
320 skb_put(skb, hlen); 322 skb_put(skb, hlen);
321 } 323 }
322 324
323 if (length) { 325 if (length) {
324 dma_unmap_page(&pdev->dev, buffer_info->page_dma, 326 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
325 PAGE_SIZE / 2, 327 PAGE_SIZE / 2,
326 DMA_FROM_DEVICE); 328 DMA_FROM_DEVICE);
327 buffer_info->page_dma = 0; 329 buffer_info->page_dma = 0;
328 330
329 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 331 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
330 buffer_info->page, 332 buffer_info->page,
331 buffer_info->page_offset, 333 buffer_info->page_offset,
332 length); 334 length);
333 335
334 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 336 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
335 (page_count(buffer_info->page) != 1)) 337 (page_count(buffer_info->page) != 1))
@@ -370,7 +372,7 @@ send_up:
370 skb->protocol = eth_type_trans(skb, netdev); 372 skb->protocol = eth_type_trans(skb, netdev);
371 373
372 igbvf_receive_skb(adapter, netdev, skb, staterr, 374 igbvf_receive_skb(adapter, netdev, skb, staterr,
373 rx_desc->wb.upper.vlan); 375 rx_desc->wb.upper.vlan);
374 376
375next_desc: 377next_desc:
376 rx_desc->wb.upper.status_error = 0; 378 rx_desc->wb.upper.status_error = 0;
@@ -402,7 +404,7 @@ next_desc:
402} 404}
403 405
404static void igbvf_put_txbuf(struct igbvf_adapter *adapter, 406static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
405 struct igbvf_buffer *buffer_info) 407 struct igbvf_buffer *buffer_info)
406{ 408{
407 if (buffer_info->dma) { 409 if (buffer_info->dma) {
408 if (buffer_info->mapped_as_page) 410 if (buffer_info->mapped_as_page)
@@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
431 * Return 0 on success, negative on failure 433 * Return 0 on success, negative on failure
432 **/ 434 **/
433int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, 435int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
434 struct igbvf_ring *tx_ring) 436 struct igbvf_ring *tx_ring)
435{ 437{
436 struct pci_dev *pdev = adapter->pdev; 438 struct pci_dev *pdev = adapter->pdev;
437 int size; 439 int size;
@@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
458err: 460err:
459 vfree(tx_ring->buffer_info); 461 vfree(tx_ring->buffer_info);
460 dev_err(&adapter->pdev->dev, 462 dev_err(&adapter->pdev->dev,
461 "Unable to allocate memory for the transmit descriptor ring\n"); 463 "Unable to allocate memory for the transmit descriptor ring\n");
462 return -ENOMEM; 464 return -ENOMEM;
463} 465}
464 466
@@ -501,7 +503,7 @@ err:
501 vfree(rx_ring->buffer_info); 503 vfree(rx_ring->buffer_info);
502 rx_ring->buffer_info = NULL; 504 rx_ring->buffer_info = NULL;
503 dev_err(&adapter->pdev->dev, 505 dev_err(&adapter->pdev->dev,
504 "Unable to allocate memory for the receive descriptor ring\n"); 506 "Unable to allocate memory for the receive descriptor ring\n");
505 return -ENOMEM; 507 return -ENOMEM;
506} 508}
507 509
@@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
578 for (i = 0; i < rx_ring->count; i++) { 580 for (i = 0; i < rx_ring->count; i++) {
579 buffer_info = &rx_ring->buffer_info[i]; 581 buffer_info = &rx_ring->buffer_info[i];
580 if (buffer_info->dma) { 582 if (buffer_info->dma) {
581 if (adapter->rx_ps_hdr_size){ 583 if (adapter->rx_ps_hdr_size) {
582 dma_unmap_single(&pdev->dev, buffer_info->dma, 584 dma_unmap_single(&pdev->dev, buffer_info->dma,
583 adapter->rx_ps_hdr_size, 585 adapter->rx_ps_hdr_size,
584 DMA_FROM_DEVICE); 586 DMA_FROM_DEVICE);
585 } else { 587 } else {
586 dma_unmap_single(&pdev->dev, buffer_info->dma, 588 dma_unmap_single(&pdev->dev, buffer_info->dma,
587 adapter->rx_buffer_len, 589 adapter->rx_buffer_len,
588 DMA_FROM_DEVICE); 590 DMA_FROM_DEVICE);
589 } 591 }
590 buffer_info->dma = 0; 592 buffer_info->dma = 0;
@@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
599 if (buffer_info->page_dma) 601 if (buffer_info->page_dma)
600 dma_unmap_page(&pdev->dev, 602 dma_unmap_page(&pdev->dev,
601 buffer_info->page_dma, 603 buffer_info->page_dma,
602 PAGE_SIZE / 2, 604 PAGE_SIZE / 2,
603 DMA_FROM_DEVICE); 605 DMA_FROM_DEVICE);
604 put_page(buffer_info->page); 606 put_page(buffer_info->page);
605 buffer_info->page = NULL; 607 buffer_info->page = NULL;
@@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
638 rx_ring->buffer_info = NULL; 640 rx_ring->buffer_info = NULL;
639 641
640 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 642 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
641 rx_ring->dma); 643 rx_ring->dma);
642 rx_ring->desc = NULL; 644 rx_ring->desc = NULL;
643} 645}
644 646
@@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
649 * @packets: the number of packets during this measurement interval 651 * @packets: the number of packets during this measurement interval
650 * @bytes: the number of bytes during this measurement interval 652 * @bytes: the number of bytes during this measurement interval
651 * 653 *
652 * Stores a new ITR value based on packets and byte 654 * Stores a new ITR value based on packets and byte counts during the last
653 * counts during the last interrupt. The advantage of per interrupt 655 * interrupt. The advantage of per interrupt computation is faster updates
654 * computation is faster updates and more accurate ITR for the current 656 * and more accurate ITR for the current traffic pattern. Constants in this
655 * traffic pattern. Constants in this function were computed 657 * function were computed based on theoretical maximum wire speed and thresholds
656 * based on theoretical maximum wire speed and thresholds were set based 658 * were set based on testing data as well as attempting to minimize response
657 * on testing data as well as attempting to minimize response time 659 * time while increasing bulk throughput.
658 * while increasing bulk throughput.
659 **/ 660 **/
660static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, 661static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
661 enum latency_range itr_setting, 662 enum latency_range itr_setting,
@@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
744 745
745 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); 746 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
746 747
747
748 if (new_itr != adapter->tx_ring->itr_val) { 748 if (new_itr != adapter->tx_ring->itr_val) {
749 u32 current_itr = adapter->tx_ring->itr_val; 749 u32 current_itr = adapter->tx_ring->itr_val;
750 /* 750 /* this attempts to bias the interrupt rate towards Bulk
751 * this attempts to bias the interrupt rate towards Bulk
752 * by adding intermediate steps when interrupt rate is 751 * by adding intermediate steps when interrupt rate is
753 * increasing 752 * increasing
754 */ 753 */
755 new_itr = new_itr > current_itr ? 754 new_itr = new_itr > current_itr ?
756 min(current_itr + (new_itr >> 2), new_itr) : 755 min(current_itr + (new_itr >> 2), new_itr) :
757 new_itr; 756 new_itr;
758 adapter->tx_ring->itr_val = new_itr; 757 adapter->tx_ring->itr_val = new_itr;
759 758
760 adapter->tx_ring->set_itr = 1; 759 adapter->tx_ring->set_itr = 1;
@@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
772 771
773 if (new_itr != adapter->rx_ring->itr_val) { 772 if (new_itr != adapter->rx_ring->itr_val) {
774 u32 current_itr = adapter->rx_ring->itr_val; 773 u32 current_itr = adapter->rx_ring->itr_val;
774
775 new_itr = new_itr > current_itr ? 775 new_itr = new_itr > current_itr ?
776 min(current_itr + (new_itr >> 2), new_itr) : 776 min(current_itr + (new_itr >> 2), new_itr) :
777 new_itr; 777 new_itr;
778 adapter->rx_ring->itr_val = new_itr; 778 adapter->rx_ring->itr_val = new_itr;
779 779
780 adapter->rx_ring->set_itr = 1; 780 adapter->rx_ring->set_itr = 1;
@@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
829 segs = skb_shinfo(skb)->gso_segs ?: 1; 829 segs = skb_shinfo(skb)->gso_segs ?: 1;
830 /* multiply data chunks by size of headers */ 830 /* multiply data chunks by size of headers */
831 bytecount = ((segs - 1) * skb_headlen(skb)) + 831 bytecount = ((segs - 1) * skb_headlen(skb)) +
832 skb->len; 832 skb->len;
833 total_packets += segs; 833 total_packets += segs;
834 total_bytes += bytecount; 834 total_bytes += bytecount;
835 } 835 }
@@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
849 849
850 tx_ring->next_to_clean = i; 850 tx_ring->next_to_clean = i;
851 851
852 if (unlikely(count && 852 if (unlikely(count && netif_carrier_ok(netdev) &&
853 netif_carrier_ok(netdev) && 853 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
854 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
855 /* Make sure that anybody stopping the queue after this 854 /* Make sure that anybody stopping the queue after this
856 * sees the new next_to_clean. 855 * sees the new next_to_clean.
857 */ 856 */
@@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
902 adapter->total_tx_bytes = 0; 901 adapter->total_tx_bytes = 0;
903 adapter->total_tx_packets = 0; 902 adapter->total_tx_packets = 0;
904 903
905 /* auto mask will automatically reenable the interrupt when we write 904 /* auto mask will automatically re-enable the interrupt when we write
906 * EICS */ 905 * EICS
906 */
907 if (!igbvf_clean_tx_irq(tx_ring)) 907 if (!igbvf_clean_tx_irq(tx_ring))
908 /* Ring was not completely cleaned, so fire another interrupt */ 908 /* Ring was not completely cleaned, so fire another interrupt */
909 ew32(EICS, tx_ring->eims_value); 909 ew32(EICS, tx_ring->eims_value);
@@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
941#define IGBVF_NO_QUEUE -1 941#define IGBVF_NO_QUEUE -1
942 942
943static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, 943static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
944 int tx_queue, int msix_vector) 944 int tx_queue, int msix_vector)
945{ 945{
946 struct e1000_hw *hw = &adapter->hw; 946 struct e1000_hw *hw = &adapter->hw;
947 u32 ivar, index; 947 u32 ivar, index;
948 948
949 /* 82576 uses a table-based method for assigning vectors. 949 /* 82576 uses a table-based method for assigning vectors.
950 Each queue has a single entry in the table to which we write 950 * Each queue has a single entry in the table to which we write
951 a vector number along with a "valid" bit. Sadly, the layout 951 * a vector number along with a "valid" bit. Sadly, the layout
952 of the table is somewhat counterintuitive. */ 952 * of the table is somewhat counterintuitive.
953 */
953 if (rx_queue > IGBVF_NO_QUEUE) { 954 if (rx_queue > IGBVF_NO_QUEUE) {
954 index = (rx_queue >> 1); 955 index = (rx_queue >> 1);
955 ivar = array_er32(IVAR0, index); 956 ivar = array_er32(IVAR0, index);
@@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
984 985
985/** 986/**
986 * igbvf_configure_msix - Configure MSI-X hardware 987 * igbvf_configure_msix - Configure MSI-X hardware
988 * @adapter: board private structure
987 * 989 *
988 * igbvf_configure_msix sets up the hardware to properly 990 * igbvf_configure_msix sets up the hardware to properly
989 * generate MSI-X interrupts. 991 * generate MSI-X interrupts.
@@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
1027 1029
1028/** 1030/**
1029 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported 1031 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1032 * @adapter: board private structure
1030 * 1033 *
1031 * Attempt to configure interrupts using the best available 1034 * Attempt to configure interrupts using the best available
1032 * capabilities of the hardware and kernel. 1035 * capabilities of the hardware and kernel.
@@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1036 int err = -ENOMEM; 1039 int err = -ENOMEM;
1037 int i; 1040 int i;
1038 1041
1039 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ 1042 /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
1040 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), 1043 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1041 GFP_KERNEL); 1044 GFP_KERNEL);
1042 if (adapter->msix_entries) { 1045 if (adapter->msix_entries) {
1043 for (i = 0; i < 3; i++) 1046 for (i = 0; i < 3; i++)
1044 adapter->msix_entries[i].entry = i; 1047 adapter->msix_entries[i].entry = i;
1045 1048
1046 err = pci_enable_msix_range(adapter->pdev, 1049 err = pci_enable_msix_range(adapter->pdev,
1047 adapter->msix_entries, 3, 3); 1050 adapter->msix_entries, 3, 3);
1048 } 1051 }
1049 1052
1050 if (err < 0) { 1053 if (err < 0) {
1051 /* MSI-X failed */ 1054 /* MSI-X failed */
1052 dev_err(&adapter->pdev->dev, 1055 dev_err(&adapter->pdev->dev,
1053 "Failed to initialize MSI-X interrupts.\n"); 1056 "Failed to initialize MSI-X interrupts.\n");
1054 igbvf_reset_interrupt_capability(adapter); 1057 igbvf_reset_interrupt_capability(adapter);
1055 } 1058 }
1056} 1059}
1057 1060
1058/** 1061/**
1059 * igbvf_request_msix - Initialize MSI-X interrupts 1062 * igbvf_request_msix - Initialize MSI-X interrupts
1063 * @adapter: board private structure
1060 * 1064 *
1061 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the 1065 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1062 * kernel. 1066 * kernel.
@@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
1075 } 1079 }
1076 1080
1077 err = request_irq(adapter->msix_entries[vector].vector, 1081 err = request_irq(adapter->msix_entries[vector].vector,
1078 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1082 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1079 netdev); 1083 netdev);
1080 if (err) 1084 if (err)
1081 goto out; 1085 goto out;
1082 1086
@@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
1085 vector++; 1089 vector++;
1086 1090
1087 err = request_irq(adapter->msix_entries[vector].vector, 1091 err = request_irq(adapter->msix_entries[vector].vector,
1088 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1092 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1089 netdev); 1093 netdev);
1090 if (err) 1094 if (err)
1091 goto out; 1095 goto out;
1092 1096
@@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
1095 vector++; 1099 vector++;
1096 1100
1097 err = request_irq(adapter->msix_entries[vector].vector, 1101 err = request_irq(adapter->msix_entries[vector].vector,
1098 igbvf_msix_other, 0, netdev->name, netdev); 1102 igbvf_msix_other, 0, netdev->name, netdev);
1099 if (err) 1103 if (err)
1100 goto out; 1104 goto out;
1101 1105
@@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1130 1134
1131/** 1135/**
1132 * igbvf_request_irq - initialize interrupts 1136 * igbvf_request_irq - initialize interrupts
1137 * @adapter: board private structure
1133 * 1138 *
1134 * Attempts to configure interrupts using the best available 1139 * Attempts to configure interrupts using the best available
1135 * capabilities of the hardware and kernel. 1140 * capabilities of the hardware and kernel.
@@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter)
1146 return err; 1151 return err;
1147 1152
1148 dev_err(&adapter->pdev->dev, 1153 dev_err(&adapter->pdev->dev,
1149 "Unable to allocate interrupt, Error: %d\n", err); 1154 "Unable to allocate interrupt, Error: %d\n", err);
1150 1155
1151 return err; 1156 return err;
1152} 1157}
@@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter)
1164 1169
1165/** 1170/**
1166 * igbvf_irq_disable - Mask off interrupt generation on the NIC 1171 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1172 * @adapter: board private structure
1167 **/ 1173 **/
1168static void igbvf_irq_disable(struct igbvf_adapter *adapter) 1174static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1169{ 1175{
@@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1177 1183
1178/** 1184/**
1179 * igbvf_irq_enable - Enable default interrupt generation settings 1185 * igbvf_irq_enable - Enable default interrupt generation settings
1186 * @adapter: board private structure
1180 **/ 1187 **/
1181static void igbvf_irq_enable(struct igbvf_adapter *adapter) 1188static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1182{ 1189{
@@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
1252 1259
1253 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1260 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1254 dev_err(&adapter->pdev->dev, 1261 dev_err(&adapter->pdev->dev,
1255 "Failed to remove vlan id %d\n", vid); 1262 "Failed to remove vlan id %d\n", vid);
1256 return -EINVAL; 1263 return -EINVAL;
1257 } 1264 }
1258 clear_bit(vid, adapter->active_vlans); 1265 clear_bit(vid, adapter->active_vlans);
@@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1298 1305
1299 /* Turn off Relaxed Ordering on head write-backs. The writebacks 1306 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1300 * MUST be delivered in order or it will completely screw up 1307 * MUST be delivered in order or it will completely screw up
1301 * our bookeeping. 1308 * our bookkeeping.
1302 */ 1309 */
1303 dca_txctrl = er32(DCA_TXCTRL(0)); 1310 dca_txctrl = er32(DCA_TXCTRL(0));
1304 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1311 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
@@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1325 u32 srrctl = 0; 1332 u32 srrctl = 0;
1326 1333
1327 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | 1334 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1328 E1000_SRRCTL_BSIZEHDR_MASK | 1335 E1000_SRRCTL_BSIZEHDR_MASK |
1329 E1000_SRRCTL_BSIZEPKT_MASK); 1336 E1000_SRRCTL_BSIZEPKT_MASK);
1330 1337
1331 /* Enable queue drop to avoid head of line blocking */ 1338 /* Enable queue drop to avoid head of line blocking */
1332 srrctl |= E1000_SRRCTL_DROP_EN; 1339 srrctl |= E1000_SRRCTL_DROP_EN;
1333 1340
1334 /* Setup buffer sizes */ 1341 /* Setup buffer sizes */
1335 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> 1342 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1336 E1000_SRRCTL_BSIZEPKT_SHIFT; 1343 E1000_SRRCTL_BSIZEPKT_SHIFT;
1337 1344
1338 if (adapter->rx_buffer_len < 2048) { 1345 if (adapter->rx_buffer_len < 2048) {
1339 adapter->rx_ps_hdr_size = 0; 1346 adapter->rx_ps_hdr_size = 0;
@@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1341 } else { 1348 } else {
1342 adapter->rx_ps_hdr_size = 128; 1349 adapter->rx_ps_hdr_size = 128;
1343 srrctl |= adapter->rx_ps_hdr_size << 1350 srrctl |= adapter->rx_ps_hdr_size <<
1344 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1351 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1345 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1352 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1346 } 1353 }
1347 1354
@@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1369 1376
1370 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1377 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1371 1378
1372 /* 1379 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1373 * Setup the HW Rx Head and Tail Descriptor Pointers and
1374 * the Base and Length of the Rx Descriptor Ring 1380 * the Base and Length of the Rx Descriptor Ring
1375 */ 1381 */
1376 rdba = rx_ring->dma; 1382 rdba = rx_ring->dma;
@@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter)
1441 igbvf_setup_srrctl(adapter); 1447 igbvf_setup_srrctl(adapter);
1442 igbvf_configure_rx(adapter); 1448 igbvf_configure_rx(adapter);
1443 igbvf_alloc_rx_buffers(adapter->rx_ring, 1449 igbvf_alloc_rx_buffers(adapter->rx_ring,
1444 igbvf_desc_unused(adapter->rx_ring)); 1450 igbvf_desc_unused(adapter->rx_ring));
1445} 1451}
1446 1452
1447/* igbvf_reset - bring the hardware into a known good state 1453/* igbvf_reset - bring the hardware into a known good state
1454 * @adapter: private board structure
1448 * 1455 *
1449 * This function boots the hardware and enables some settings that 1456 * This function boots the hardware and enables some settings that
1450 * require a configuration cycle of the hardware - those cannot be 1457 * require a configuration cycle of the hardware - those cannot be
@@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter)
1494 hw->mac.get_link_status = 1; 1501 hw->mac.get_link_status = 1;
1495 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1502 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1496 1503
1497
1498 return 0; 1504 return 0;
1499} 1505}
1500 1506
@@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter)
1504 struct e1000_hw *hw = &adapter->hw; 1510 struct e1000_hw *hw = &adapter->hw;
1505 u32 rxdctl, txdctl; 1511 u32 rxdctl, txdctl;
1506 1512
1507 /* 1513 /* signal that we're down so the interrupt handler does not
1508 * signal that we're down so the interrupt handler does not
1509 * reschedule our watchdog timer 1514 * reschedule our watchdog timer
1510 */ 1515 */
1511 set_bit(__IGBVF_DOWN, &adapter->state); 1516 set_bit(__IGBVF_DOWN, &adapter->state);
@@ -1547,7 +1552,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1547{ 1552{
1548 might_sleep(); 1553 might_sleep();
1549 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 1554 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1550 msleep(1); 1555 usleep_range(1000, 2000);
1551 igbvf_down(adapter); 1556 igbvf_down(adapter);
1552 igbvf_up(adapter); 1557 igbvf_up(adapter);
1553 clear_bit(__IGBVF_RESETTING, &adapter->state); 1558 clear_bit(__IGBVF_RESETTING, &adapter->state);
@@ -1662,8 +1667,7 @@ static int igbvf_open(struct net_device *netdev)
1662 if (err) 1667 if (err)
1663 goto err_setup_rx; 1668 goto err_setup_rx;
1664 1669
1665 /* 1670 /* before we allocate an interrupt, we must be ready to handle it.
1666 * before we allocate an interrupt, we must be ready to handle it.
1667 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1671 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1668 * as soon as we call pci_request_irq, so we have to setup our 1672 * as soon as we call pci_request_irq, so we have to setup our
1669 * clean_rx handler before we do so. 1673 * clean_rx handler before we do so.
@@ -1725,6 +1729,7 @@ static int igbvf_close(struct net_device *netdev)
1725 1729
1726 return 0; 1730 return 0;
1727} 1731}
1732
1728/** 1733/**
1729 * igbvf_set_mac - Change the Ethernet Address of the NIC 1734 * igbvf_set_mac - Change the Ethernet Address of the NIC
1730 * @netdev: network interface device structure 1735 * @netdev: network interface device structure
@@ -1753,15 +1758,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
1753 return 0; 1758 return 0;
1754} 1759}
1755 1760
1756#define UPDATE_VF_COUNTER(reg, name) \ 1761#define UPDATE_VF_COUNTER(reg, name) \
1757 { \ 1762{ \
1758 u32 current_counter = er32(reg); \ 1763 u32 current_counter = er32(reg); \
1759 if (current_counter < adapter->stats.last_##name) \ 1764 if (current_counter < adapter->stats.last_##name) \
1760 adapter->stats.name += 0x100000000LL; \ 1765 adapter->stats.name += 0x100000000LL; \
1761 adapter->stats.last_##name = current_counter; \ 1766 adapter->stats.last_##name = current_counter; \
1762 adapter->stats.name &= 0xFFFFFFFF00000000LL; \ 1767 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1763 adapter->stats.name |= current_counter; \ 1768 adapter->stats.name |= current_counter; \
1764 } 1769}
1765 1770
1766/** 1771/**
1767 * igbvf_update_stats - Update the board statistics counters 1772 * igbvf_update_stats - Update the board statistics counters
@@ -1772,8 +1777,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
1772 struct e1000_hw *hw = &adapter->hw; 1777 struct e1000_hw *hw = &adapter->hw;
1773 struct pci_dev *pdev = adapter->pdev; 1778 struct pci_dev *pdev = adapter->pdev;
1774 1779
1775 /* 1780 /* Prevent stats update while adapter is being reset, link is down
1776 * Prevent stats update while adapter is being reset, link is down
1777 * or if the pci connection is down. 1781 * or if the pci connection is down.
1778 */ 1782 */
1779 if (adapter->link_speed == 0) 1783 if (adapter->link_speed == 0)
@@ -1832,7 +1836,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
1832 **/ 1836 **/
1833static void igbvf_watchdog(unsigned long data) 1837static void igbvf_watchdog(unsigned long data)
1834{ 1838{
1835 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; 1839 struct igbvf_adapter *adapter = (struct igbvf_adapter *)data;
1836 1840
1837 /* Do the rest outside of interrupt context */ 1841 /* Do the rest outside of interrupt context */
1838 schedule_work(&adapter->watchdog_task); 1842 schedule_work(&adapter->watchdog_task);
@@ -1841,8 +1845,8 @@ static void igbvf_watchdog(unsigned long data)
1841static void igbvf_watchdog_task(struct work_struct *work) 1845static void igbvf_watchdog_task(struct work_struct *work)
1842{ 1846{
1843 struct igbvf_adapter *adapter = container_of(work, 1847 struct igbvf_adapter *adapter = container_of(work,
1844 struct igbvf_adapter, 1848 struct igbvf_adapter,
1845 watchdog_task); 1849 watchdog_task);
1846 struct net_device *netdev = adapter->netdev; 1850 struct net_device *netdev = adapter->netdev;
1847 struct e1000_mac_info *mac = &adapter->hw.mac; 1851 struct e1000_mac_info *mac = &adapter->hw.mac;
1848 struct igbvf_ring *tx_ring = adapter->tx_ring; 1852 struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -1855,8 +1859,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
1855 if (link) { 1859 if (link) {
1856 if (!netif_carrier_ok(netdev)) { 1860 if (!netif_carrier_ok(netdev)) {
1857 mac->ops.get_link_up_info(&adapter->hw, 1861 mac->ops.get_link_up_info(&adapter->hw,
1858 &adapter->link_speed, 1862 &adapter->link_speed,
1859 &adapter->link_duplex); 1863 &adapter->link_duplex);
1860 igbvf_print_link_info(adapter); 1864 igbvf_print_link_info(adapter);
1861 1865
1862 netif_carrier_on(netdev); 1866 netif_carrier_on(netdev);
@@ -1876,10 +1880,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
1876 igbvf_update_stats(adapter); 1880 igbvf_update_stats(adapter);
1877 } else { 1881 } else {
1878 tx_pending = (igbvf_desc_unused(tx_ring) + 1 < 1882 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1879 tx_ring->count); 1883 tx_ring->count);
1880 if (tx_pending) { 1884 if (tx_pending) {
1881 /* 1885 /* We've lost link, so the controller stops DMA,
1882 * We've lost link, so the controller stops DMA,
1883 * but we've got queued Tx work that's never going 1886 * but we've got queued Tx work that's never going
1884 * to get done, so reset controller to flush Tx. 1887 * to get done, so reset controller to flush Tx.
1885 * (Do the reset outside of interrupt context). 1888 * (Do the reset outside of interrupt context).
@@ -1898,15 +1901,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
1898 round_jiffies(jiffies + (2 * HZ))); 1901 round_jiffies(jiffies + (2 * HZ)));
1899} 1902}
1900 1903
1901#define IGBVF_TX_FLAGS_CSUM 0x00000001 1904#define IGBVF_TX_FLAGS_CSUM 0x00000001
1902#define IGBVF_TX_FLAGS_VLAN 0x00000002 1905#define IGBVF_TX_FLAGS_VLAN 0x00000002
1903#define IGBVF_TX_FLAGS_TSO 0x00000004 1906#define IGBVF_TX_FLAGS_TSO 0x00000004
1904#define IGBVF_TX_FLAGS_IPV4 0x00000008 1907#define IGBVF_TX_FLAGS_IPV4 0x00000008
1905#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 1908#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1906#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 1909#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1907 1910
1908static int igbvf_tso(struct igbvf_adapter *adapter, 1911static int igbvf_tso(struct igbvf_adapter *adapter,
1909 struct igbvf_ring *tx_ring, 1912 struct igbvf_ring *tx_ring,
1910 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, 1913 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
1911 __be16 protocol) 1914 __be16 protocol)
1912{ 1915{
@@ -1930,17 +1933,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1930 1933
1931 if (protocol == htons(ETH_P_IP)) { 1934 if (protocol == htons(ETH_P_IP)) {
1932 struct iphdr *iph = ip_hdr(skb); 1935 struct iphdr *iph = ip_hdr(skb);
1936
1933 iph->tot_len = 0; 1937 iph->tot_len = 0;
1934 iph->check = 0; 1938 iph->check = 0;
1935 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1939 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1936 iph->daddr, 0, 1940 iph->daddr, 0,
1937 IPPROTO_TCP, 1941 IPPROTO_TCP,
1938 0); 1942 0);
1939 } else if (skb_is_gso_v6(skb)) { 1943 } else if (skb_is_gso_v6(skb)) {
1940 ipv6_hdr(skb)->payload_len = 0; 1944 ipv6_hdr(skb)->payload_len = 0;
1941 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1945 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1942 &ipv6_hdr(skb)->daddr, 1946 &ipv6_hdr(skb)->daddr,
1943 0, IPPROTO_TCP, 0); 1947 0, IPPROTO_TCP, 0);
1944 } 1948 }
1945 1949
1946 i = tx_ring->next_to_use; 1950 i = tx_ring->next_to_use;
@@ -1984,7 +1988,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
1984} 1988}
1985 1989
1986static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1990static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1987 struct igbvf_ring *tx_ring, 1991 struct igbvf_ring *tx_ring,
1988 struct sk_buff *skb, u32 tx_flags, 1992 struct sk_buff *skb, u32 tx_flags,
1989 __be16 protocol) 1993 __be16 protocol)
1990{ 1994{
@@ -2005,8 +2009,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2005 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 2009 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2006 if (skb->ip_summed == CHECKSUM_PARTIAL) 2010 if (skb->ip_summed == CHECKSUM_PARTIAL)
2007 info |= (skb_transport_header(skb) - 2011 info |= (skb_transport_header(skb) -
2008 skb_network_header(skb)); 2012 skb_network_header(skb));
2009
2010 2013
2011 context_desc->vlan_macip_lens = cpu_to_le32(info); 2014 context_desc->vlan_macip_lens = cpu_to_le32(info);
2012 2015
@@ -2055,6 +2058,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2055 2058
2056 netif_stop_queue(netdev); 2059 netif_stop_queue(netdev);
2057 2060
2061 /* Herbert's original patch had:
2062 * smp_mb__after_netif_stop_queue();
2063 * but since that doesn't exist yet, just open code it.
2064 */
2058 smp_mb(); 2065 smp_mb();
2059 2066
2060 /* We need to check again just in case room has been made available */ 2067 /* We need to check again just in case room has been made available */
@@ -2067,11 +2074,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2067 return 0; 2074 return 0;
2068} 2075}
2069 2076
2070#define IGBVF_MAX_TXD_PWR 16 2077#define IGBVF_MAX_TXD_PWR 16
2071#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2078#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2072 2079
2073static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2080static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2074 struct igbvf_ring *tx_ring, 2081 struct igbvf_ring *tx_ring,
2075 struct sk_buff *skb) 2082 struct sk_buff *skb)
2076{ 2083{
2077 struct igbvf_buffer *buffer_info; 2084 struct igbvf_buffer *buffer_info;
@@ -2093,7 +2100,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2093 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2100 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2094 goto dma_error; 2101 goto dma_error;
2095 2102
2096
2097 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2103 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2098 const struct skb_frag_struct *frag; 2104 const struct skb_frag_struct *frag;
2099 2105
@@ -2111,7 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2111 buffer_info->time_stamp = jiffies; 2117 buffer_info->time_stamp = jiffies;
2112 buffer_info->mapped_as_page = true; 2118 buffer_info->mapped_as_page = true;
2113 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, 2119 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2114 DMA_TO_DEVICE); 2120 DMA_TO_DEVICE);
2115 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2121 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2116 goto dma_error; 2122 goto dma_error;
2117 } 2123 }
@@ -2133,7 +2139,7 @@ dma_error:
2133 2139
2134 /* clear timestamp and dma mappings for remaining portion of packet */ 2140 /* clear timestamp and dma mappings for remaining portion of packet */
2135 while (count--) { 2141 while (count--) {
2136 if (i==0) 2142 if (i == 0)
2137 i += tx_ring->count; 2143 i += tx_ring->count;
2138 i--; 2144 i--;
2139 buffer_info = &tx_ring->buffer_info[i]; 2145 buffer_info = &tx_ring->buffer_info[i];
@@ -2144,10 +2150,10 @@ dma_error:
2144} 2150}
2145 2151
2146static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2152static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2147 struct igbvf_ring *tx_ring, 2153 struct igbvf_ring *tx_ring,
2148 int tx_flags, int count, 2154 int tx_flags, int count,
2149 unsigned int first, u32 paylen, 2155 unsigned int first, u32 paylen,
2150 u8 hdr_len) 2156 u8 hdr_len)
2151{ 2157{
2152 union e1000_adv_tx_desc *tx_desc = NULL; 2158 union e1000_adv_tx_desc *tx_desc = NULL;
2153 struct igbvf_buffer *buffer_info; 2159 struct igbvf_buffer *buffer_info;
@@ -2155,7 +2161,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2155 unsigned int i; 2161 unsigned int i;
2156 2162
2157 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 2163 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2158 E1000_ADVTXD_DCMD_DEXT); 2164 E1000_ADVTXD_DCMD_DEXT);
2159 2165
2160 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2166 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2161 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2167 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
@@ -2182,7 +2188,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2182 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 2188 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2183 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 2189 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2184 tx_desc->read.cmd_type_len = 2190 tx_desc->read.cmd_type_len =
2185 cpu_to_le32(cmd_type_len | buffer_info->length); 2191 cpu_to_le32(cmd_type_len | buffer_info->length);
2186 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2192 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2187 i++; 2193 i++;
2188 if (i == tx_ring->count) 2194 if (i == tx_ring->count)
@@ -2193,14 +2199,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2193 /* Force memory writes to complete before letting h/w 2199 /* Force memory writes to complete before letting h/w
2194 * know there are new descriptors to fetch. (Only 2200 * know there are new descriptors to fetch. (Only
2195 * applicable for weak-ordered memory model archs, 2201 * applicable for weak-ordered memory model archs,
2196 * such as IA-64). */ 2202 * such as IA-64).
2203 */
2197 wmb(); 2204 wmb();
2198 2205
2199 tx_ring->buffer_info[first].next_to_watch = tx_desc; 2206 tx_ring->buffer_info[first].next_to_watch = tx_desc;
2200 tx_ring->next_to_use = i; 2207 tx_ring->next_to_use = i;
2201 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2208 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2202 /* we need this if more than one processor can write to our tail 2209 /* we need this if more than one processor can write to our tail
2203 * at a time, it syncronizes IO on IA64/Altix systems */ 2210 * at a time, it synchronizes IO on IA64/Altix systems
2211 */
2204 mmiowb(); 2212 mmiowb();
2205} 2213}
2206 2214
@@ -2225,11 +2233,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2225 return NETDEV_TX_OK; 2233 return NETDEV_TX_OK;
2226 } 2234 }
2227 2235
2228 /* 2236 /* need: count + 4 desc gap to keep tail from touching
2229 * need: count + 4 desc gap to keep tail from touching 2237 * + 2 desc gap to keep tail from touching head,
2230 * + 2 desc gap to keep tail from touching head, 2238 * + 1 desc for skb->data,
2231 * + 1 desc for skb->data, 2239 * + 1 desc for context descriptor,
2232 * + 1 desc for context descriptor,
2233 * head, otherwise try next time 2240 * head, otherwise try next time
2234 */ 2241 */
2235 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { 2242 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
@@ -2258,11 +2265,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2258 if (tso) 2265 if (tso)
2259 tx_flags |= IGBVF_TX_FLAGS_TSO; 2266 tx_flags |= IGBVF_TX_FLAGS_TSO;
2260 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && 2267 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
2261 (skb->ip_summed == CHECKSUM_PARTIAL)) 2268 (skb->ip_summed == CHECKSUM_PARTIAL))
2262 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2269 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2263 2270
2264 /* 2271 /* count reflects descriptors mapped, if 0 then mapping error
2265 * count reflects descriptors mapped, if 0 then mapping error
2266 * has occurred and we need to rewind the descriptor queue 2272 * has occurred and we need to rewind the descriptor queue
2267 */ 2273 */
2268 count = igbvf_tx_map_adv(adapter, tx_ring, skb); 2274 count = igbvf_tx_map_adv(adapter, tx_ring, skb);
@@ -2313,6 +2319,7 @@ static void igbvf_tx_timeout(struct net_device *netdev)
2313static void igbvf_reset_task(struct work_struct *work) 2319static void igbvf_reset_task(struct work_struct *work)
2314{ 2320{
2315 struct igbvf_adapter *adapter; 2321 struct igbvf_adapter *adapter;
2322
2316 adapter = container_of(work, struct igbvf_adapter, reset_task); 2323 adapter = container_of(work, struct igbvf_adapter, reset_task);
2317 2324
2318 igbvf_reinit_locked(adapter); 2325 igbvf_reinit_locked(adapter);
@@ -2356,14 +2363,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2356 } 2363 }
2357 2364
2358 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 2365 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2359 msleep(1); 2366 usleep_range(1000, 2000);
2360 /* igbvf_down has a dependency on max_frame_size */ 2367 /* igbvf_down has a dependency on max_frame_size */
2361 adapter->max_frame_size = max_frame; 2368 adapter->max_frame_size = max_frame;
2362 if (netif_running(netdev)) 2369 if (netif_running(netdev))
2363 igbvf_down(adapter); 2370 igbvf_down(adapter);
2364 2371
2365 /* 2372 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2366 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2367 * means we reserve 2 more, this pushes us to allocate from the next 2373 * means we reserve 2 more, this pushes us to allocate from the next
2368 * larger slab size. 2374 * larger slab size.
2369 * i.e. RXBUFFER_2048 --> size-4096 slab 2375 * i.e. RXBUFFER_2048 --> size-4096 slab
@@ -2382,15 +2388,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2382 adapter->rx_buffer_len = PAGE_SIZE / 2; 2388 adapter->rx_buffer_len = PAGE_SIZE / 2;
2383#endif 2389#endif
2384 2390
2385
2386 /* adjust allocation if LPE protects us, and we aren't using SBP */ 2391 /* adjust allocation if LPE protects us, and we aren't using SBP */
2387 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 2392 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2388 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 2393 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2389 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + 2394 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2390 ETH_FCS_LEN; 2395 ETH_FCS_LEN;
2391 2396
2392 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 2397 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2393 netdev->mtu, new_mtu); 2398 netdev->mtu, new_mtu);
2394 netdev->mtu = new_mtu; 2399 netdev->mtu = new_mtu;
2395 2400
2396 if (netif_running(netdev)) 2401 if (netif_running(netdev))
@@ -2477,8 +2482,7 @@ static void igbvf_shutdown(struct pci_dev *pdev)
2477} 2482}
2478 2483
2479#ifdef CONFIG_NET_POLL_CONTROLLER 2484#ifdef CONFIG_NET_POLL_CONTROLLER
2480/* 2485/* Polling 'interrupt' - used by things like netconsole to send skbs
2481 * Polling 'interrupt' - used by things like netconsole to send skbs
2482 * without having to re-enable interrupts. It's not called while 2486 * without having to re-enable interrupts. It's not called while
2483 * the interrupt routine is executing. 2487 * the interrupt routine is executing.
2484 */ 2488 */
@@ -2503,7 +2507,7 @@ static void igbvf_netpoll(struct net_device *netdev)
2503 * this device has been detected. 2507 * this device has been detected.
2504 */ 2508 */
2505static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, 2509static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2506 pci_channel_state_t state) 2510 pci_channel_state_t state)
2507{ 2511{
2508 struct net_device *netdev = pci_get_drvdata(pdev); 2512 struct net_device *netdev = pci_get_drvdata(pdev);
2509 struct igbvf_adapter *adapter = netdev_priv(netdev); 2513 struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2583,7 +2587,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2583} 2587}
2584 2588
2585static int igbvf_set_features(struct net_device *netdev, 2589static int igbvf_set_features(struct net_device *netdev,
2586 netdev_features_t features) 2590 netdev_features_t features)
2587{ 2591{
2588 struct igbvf_adapter *adapter = netdev_priv(netdev); 2592 struct igbvf_adapter *adapter = netdev_priv(netdev);
2589 2593
@@ -2596,21 +2600,21 @@ static int igbvf_set_features(struct net_device *netdev,
2596} 2600}
2597 2601
2598static const struct net_device_ops igbvf_netdev_ops = { 2602static const struct net_device_ops igbvf_netdev_ops = {
2599 .ndo_open = igbvf_open, 2603 .ndo_open = igbvf_open,
2600 .ndo_stop = igbvf_close, 2604 .ndo_stop = igbvf_close,
2601 .ndo_start_xmit = igbvf_xmit_frame, 2605 .ndo_start_xmit = igbvf_xmit_frame,
2602 .ndo_get_stats = igbvf_get_stats, 2606 .ndo_get_stats = igbvf_get_stats,
2603 .ndo_set_rx_mode = igbvf_set_multi, 2607 .ndo_set_rx_mode = igbvf_set_multi,
2604 .ndo_set_mac_address = igbvf_set_mac, 2608 .ndo_set_mac_address = igbvf_set_mac,
2605 .ndo_change_mtu = igbvf_change_mtu, 2609 .ndo_change_mtu = igbvf_change_mtu,
2606 .ndo_do_ioctl = igbvf_ioctl, 2610 .ndo_do_ioctl = igbvf_ioctl,
2607 .ndo_tx_timeout = igbvf_tx_timeout, 2611 .ndo_tx_timeout = igbvf_tx_timeout,
2608 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, 2612 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2609 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, 2613 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2610#ifdef CONFIG_NET_POLL_CONTROLLER 2614#ifdef CONFIG_NET_POLL_CONTROLLER
2611 .ndo_poll_controller = igbvf_netpoll, 2615 .ndo_poll_controller = igbvf_netpoll,
2612#endif 2616#endif
2613 .ndo_set_features = igbvf_set_features, 2617 .ndo_set_features = igbvf_set_features,
2614}; 2618};
2615 2619
2616/** 2620/**
@@ -2645,8 +2649,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2645 } else { 2649 } else {
2646 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2650 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2647 if (err) { 2651 if (err) {
2648 dev_err(&pdev->dev, "No usable DMA " 2652 dev_err(&pdev->dev,
2649 "configuration, aborting\n"); 2653 "No usable DMA configuration, aborting\n");
2650 goto err_dma; 2654 goto err_dma;
2651 } 2655 }
2652 } 2656 }
@@ -2686,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2686 2690
2687 err = -EIO; 2691 err = -EIO;
2688 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2692 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2689 pci_resource_len(pdev, 0)); 2693 pci_resource_len(pdev, 0));
2690 2694
2691 if (!adapter->hw.hw_addr) 2695 if (!adapter->hw.hw_addr)
2692 goto err_ioremap; 2696 goto err_ioremap;
@@ -2712,16 +2716,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2712 adapter->bd_number = cards_found++; 2716 adapter->bd_number = cards_found++;
2713 2717
2714 netdev->hw_features = NETIF_F_SG | 2718 netdev->hw_features = NETIF_F_SG |
2715 NETIF_F_IP_CSUM | 2719 NETIF_F_IP_CSUM |
2716 NETIF_F_IPV6_CSUM | 2720 NETIF_F_IPV6_CSUM |
2717 NETIF_F_TSO | 2721 NETIF_F_TSO |
2718 NETIF_F_TSO6 | 2722 NETIF_F_TSO6 |
2719 NETIF_F_RXCSUM; 2723 NETIF_F_RXCSUM;
2720 2724
2721 netdev->features = netdev->hw_features | 2725 netdev->features = netdev->hw_features |
2722 NETIF_F_HW_VLAN_CTAG_TX | 2726 NETIF_F_HW_VLAN_CTAG_TX |
2723 NETIF_F_HW_VLAN_CTAG_RX | 2727 NETIF_F_HW_VLAN_CTAG_RX |
2724 NETIF_F_HW_VLAN_CTAG_FILTER; 2728 NETIF_F_HW_VLAN_CTAG_FILTER;
2725 2729
2726 if (pci_using_dac) 2730 if (pci_using_dac)
2727 netdev->features |= NETIF_F_HIGHDMA; 2731 netdev->features |= NETIF_F_HIGHDMA;
@@ -2742,7 +2746,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2742 if (err) 2746 if (err)
2743 dev_info(&pdev->dev, "Error reading MAC address.\n"); 2747 dev_info(&pdev->dev, "Error reading MAC address.\n");
2744 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2748 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2745 dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); 2749 dev_info(&pdev->dev,
2750 "MAC address not assigned by administrator.\n");
2746 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 2751 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2747 netdev->addr_len); 2752 netdev->addr_len);
2748 } 2753 }
@@ -2751,11 +2756,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2751 dev_info(&pdev->dev, "Assigning random MAC address.\n"); 2756 dev_info(&pdev->dev, "Assigning random MAC address.\n");
2752 eth_hw_addr_random(netdev); 2757 eth_hw_addr_random(netdev);
2753 memcpy(adapter->hw.mac.addr, netdev->dev_addr, 2758 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2754 netdev->addr_len); 2759 netdev->addr_len);
2755 } 2760 }
2756 2761
2757 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2762 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2758 (unsigned long) adapter); 2763 (unsigned long)adapter);
2759 2764
2760 INIT_WORK(&adapter->reset_task, igbvf_reset_task); 2765 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2761 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); 2766 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
@@ -2818,8 +2823,7 @@ static void igbvf_remove(struct pci_dev *pdev)
2818 struct igbvf_adapter *adapter = netdev_priv(netdev); 2823 struct igbvf_adapter *adapter = netdev_priv(netdev);
2819 struct e1000_hw *hw = &adapter->hw; 2824 struct e1000_hw *hw = &adapter->hw;
2820 2825
2821 /* 2826 /* The watchdog timer may be rescheduled, so explicitly
2822 * The watchdog timer may be rescheduled, so explicitly
2823 * disable it from being rescheduled. 2827 * disable it from being rescheduled.
2824 */ 2828 */
2825 set_bit(__IGBVF_DOWN, &adapter->state); 2829 set_bit(__IGBVF_DOWN, &adapter->state);
@@ -2832,9 +2836,8 @@ static void igbvf_remove(struct pci_dev *pdev)
2832 2836
2833 igbvf_reset_interrupt_capability(adapter); 2837 igbvf_reset_interrupt_capability(adapter);
2834 2838
2835 /* 2839 /* it is important to delete the NAPI struct prior to freeing the
2836 * it is important to delete the napi struct prior to freeing the 2840 * Rx ring so that you do not end up with null pointer refs
2837 * rx ring so that you do not end up with null pointer refs
2838 */ 2841 */
2839 netif_napi_del(&adapter->rx_ring->napi); 2842 netif_napi_del(&adapter->rx_ring->napi);
2840 kfree(adapter->tx_ring); 2843 kfree(adapter->tx_ring);
@@ -2866,17 +2869,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2866 2869
2867/* PCI Device API Driver */ 2870/* PCI Device API Driver */
2868static struct pci_driver igbvf_driver = { 2871static struct pci_driver igbvf_driver = {
2869 .name = igbvf_driver_name, 2872 .name = igbvf_driver_name,
2870 .id_table = igbvf_pci_tbl, 2873 .id_table = igbvf_pci_tbl,
2871 .probe = igbvf_probe, 2874 .probe = igbvf_probe,
2872 .remove = igbvf_remove, 2875 .remove = igbvf_remove,
2873#ifdef CONFIG_PM 2876#ifdef CONFIG_PM
2874 /* Power Management Hooks */ 2877 /* Power Management Hooks */
2875 .suspend = igbvf_suspend, 2878 .suspend = igbvf_suspend,
2876 .resume = igbvf_resume, 2879 .resume = igbvf_resume,
2877#endif 2880#endif
2878 .shutdown = igbvf_shutdown, 2881 .shutdown = igbvf_shutdown,
2879 .err_handler = &igbvf_err_handler 2882 .err_handler = &igbvf_err_handler
2880}; 2883};
2881 2884
2882/** 2885/**
@@ -2888,6 +2891,7 @@ static struct pci_driver igbvf_driver = {
2888static int __init igbvf_init_module(void) 2891static int __init igbvf_init_module(void)
2889{ 2892{
2890 int ret; 2893 int ret;
2894
2891 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); 2895 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2892 pr_info("%s\n", igbvf_copyright); 2896 pr_info("%s\n", igbvf_copyright);
2893 2897
@@ -2909,7 +2913,6 @@ static void __exit igbvf_exit_module(void)
2909} 2913}
2910module_exit(igbvf_exit_module); 2914module_exit(igbvf_exit_module);
2911 2915
2912
2913MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 2916MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2914MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); 2917MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2915MODULE_LICENSE("GPL"); 2918MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 7dc6341715dc..86a7c120b574 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -28,81 +27,81 @@
28#ifndef _E1000_REGS_H_ 27#ifndef _E1000_REGS_H_
29#define _E1000_REGS_H_ 28#define _E1000_REGS_H_
30 29
31#define E1000_CTRL 0x00000 /* Device Control - RW */ 30#define E1000_CTRL 0x00000 /* Device Control - RW */
32#define E1000_STATUS 0x00008 /* Device Status - RO */ 31#define E1000_STATUS 0x00008 /* Device Status - RO */
33#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ 32#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
34#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ 33#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
35#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) 34#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
36#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ 35#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
37#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ 36#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
38#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ 37#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
39#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ 38#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
40#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ 39#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
41#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ 40#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
42#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ 41#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
43/* 42
44 * Convenience macros 43/* Convenience macros
45 * 44 *
46 * Note: "_n" is the queue number of the register to be written to. 45 * Note: "_n" is the queue number of the register to be written to.
47 * 46 *
48 * Example usage: 47 * Example usage:
49 * E1000_RDBAL_REG(current_rx_queue) 48 * E1000_RDBAL_REG(current_rx_queue)
50 */ 49 */
51#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ 50#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
52 (0x0C000 + ((_n) * 0x40))) 51 (0x0C000 + ((_n) * 0x40)))
53#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ 52#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
54 (0x0C004 + ((_n) * 0x40))) 53 (0x0C004 + ((_n) * 0x40)))
55#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ 54#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
56 (0x0C008 + ((_n) * 0x40))) 55 (0x0C008 + ((_n) * 0x40)))
57#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ 56#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
58 (0x0C00C + ((_n) * 0x40))) 57 (0x0C00C + ((_n) * 0x40)))
59#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ 58#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
60 (0x0C010 + ((_n) * 0x40))) 59 (0x0C010 + ((_n) * 0x40)))
61#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ 60#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
62 (0x0C018 + ((_n) * 0x40))) 61 (0x0C018 + ((_n) * 0x40)))
63#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ 62#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
64 (0x0C028 + ((_n) * 0x40))) 63 (0x0C028 + ((_n) * 0x40)))
65#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ 64#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
66 (0x0E000 + ((_n) * 0x40))) 65 (0x0E000 + ((_n) * 0x40)))
67#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ 66#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
68 (0x0E004 + ((_n) * 0x40))) 67 (0x0E004 + ((_n) * 0x40)))
69#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ 68#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
70 (0x0E008 + ((_n) * 0x40))) 69 (0x0E008 + ((_n) * 0x40)))
71#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ 70#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
72 (0x0E010 + ((_n) * 0x40))) 71 (0x0E010 + ((_n) * 0x40)))
73#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ 72#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
74 (0x0E018 + ((_n) * 0x40))) 73 (0x0E018 + ((_n) * 0x40)))
75#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ 74#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
76 (0x0E028 + ((_n) * 0x40))) 75 (0x0E028 + ((_n) * 0x40)))
77#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) 76#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
78#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) 77#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
79#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 78#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
80 (0x054E0 + ((_i - 16) * 8))) 79 (0x054E0 + ((_i - 16) * 8)))
81#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 80#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
82 (0x054E4 + ((_i - 16) * 8))) 81 (0x054E4 + ((_i - 16) * 8)))
83 82
84/* Statistics registers */ 83/* Statistics registers */
85#define E1000_VFGPRC 0x00F10 84#define E1000_VFGPRC 0x00F10
86#define E1000_VFGORC 0x00F18 85#define E1000_VFGORC 0x00F18
87#define E1000_VFMPRC 0x00F3C 86#define E1000_VFMPRC 0x00F3C
88#define E1000_VFGPTC 0x00F14 87#define E1000_VFGPTC 0x00F14
89#define E1000_VFGOTC 0x00F34 88#define E1000_VFGOTC 0x00F34
90#define E1000_VFGOTLBC 0x00F50 89#define E1000_VFGOTLBC 0x00F50
91#define E1000_VFGPTLBC 0x00F44 90#define E1000_VFGPTLBC 0x00F44
92#define E1000_VFGORLBC 0x00F48 91#define E1000_VFGORLBC 0x00F48
93#define E1000_VFGPRLBC 0x00F40 92#define E1000_VFGPRLBC 0x00F40
94 93
95/* These act per VF so an array friendly macro is used */ 94/* These act per VF so an array friendly macro is used */
96#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) 95#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
97#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 96#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
98 97
99/* Define macros for handling registers */ 98/* Define macros for handling registers */
100#define er32(reg) readl(hw->hw_addr + E1000_##reg) 99#define er32(reg) readl(hw->hw_addr + E1000_##reg)
101#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) 100#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg)
102#define array_er32(reg, offset) \ 101#define array_er32(reg, offset) \
103 readl(hw->hw_addr + E1000_##reg + (offset << 2)) 102 readl(hw->hw_addr + E1000_##reg + (offset << 2))
104#define array_ew32(reg, offset, val) \ 103#define array_ew32(reg, offset, val) \
105 writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) 104 writel((val), hw->hw_addr + E1000_##reg + (offset << 2))
106#define e1e_flush() er32(STATUS) 105#define e1e_flush() er32(STATUS)
107 106
108#endif 107#endif
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 955ad8c2c534..a13baa90ae20 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -25,17 +24,16 @@
25 24
26*******************************************************************************/ 25*******************************************************************************/
27 26
28
29#include "vf.h" 27#include "vf.h"
30 28
31static s32 e1000_check_for_link_vf(struct e1000_hw *hw); 29static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
32static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, 30static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
33 u16 *duplex); 31 u16 *duplex);
34static s32 e1000_init_hw_vf(struct e1000_hw *hw); 32static s32 e1000_init_hw_vf(struct e1000_hw *hw);
35static s32 e1000_reset_hw_vf(struct e1000_hw *hw); 33static s32 e1000_reset_hw_vf(struct e1000_hw *hw);
36 34
37static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, 35static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *,
38 u32, u32, u32); 36 u32, u32, u32);
39static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); 37static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
40static s32 e1000_read_mac_addr_vf(struct e1000_hw *); 38static s32 e1000_read_mac_addr_vf(struct e1000_hw *);
41static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); 39static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
@@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw)
94 * the status register's data which is often stale and inaccurate. 92 * the status register's data which is often stale and inaccurate.
95 **/ 93 **/
96static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, 94static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
97 u16 *duplex) 95 u16 *duplex)
98{ 96{
99 s32 status; 97 s32 status;
100 98
@@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
130 u8 *addr = (u8 *)(&msgbuf[1]); 128 u8 *addr = (u8 *)(&msgbuf[1]);
131 u32 ctrl; 129 u32 ctrl;
132 130
133 /* assert vf queue/interrupt reset */ 131 /* assert VF queue/interrupt reset */
134 ctrl = er32(CTRL); 132 ctrl = er32(CTRL);
135 ew32(CTRL, ctrl | E1000_CTRL_RST); 133 ew32(CTRL, ctrl | E1000_CTRL_RST);
136 134
@@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
144 /* mailbox timeout can now become active */ 142 /* mailbox timeout can now become active */
145 mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; 143 mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
146 144
147 /* notify pf of vf reset completion */ 145 /* notify PF of VF reset completion */
148 msgbuf[0] = E1000_VF_RESET; 146 msgbuf[0] = E1000_VF_RESET;
149 mbx->ops.write_posted(hw, msgbuf, 1); 147 mbx->ops.write_posted(hw, msgbuf, 1);
150 148
@@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
153 /* set our "perm_addr" based on info provided by PF */ 151 /* set our "perm_addr" based on info provided by PF */
154 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 152 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
155 if (!ret_val) { 153 if (!ret_val) {
156 if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) 154 if (msgbuf[0] == (E1000_VF_RESET |
155 E1000_VT_MSGTYPE_ACK))
157 memcpy(hw->mac.perm_addr, addr, ETH_ALEN); 156 memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
158 else 157 else
159 ret_val = -E1000_ERR_MAC_INIT; 158 ret_val = -E1000_ERR_MAC_INIT;
@@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
194 /* Register count multiplied by bits per register */ 193 /* Register count multiplied by bits per register */
195 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 194 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
196 195
197 /* 196 /* The bit_shift is the number of left-shifts
198 * The bit_shift is the number of left-shifts
199 * where 0xFF would still fall within the hash mask. 197 * where 0xFF would still fall within the hash mask.
200 */ 198 */
201 while (hash_mask >> bit_shift != 0xFF) 199 while (hash_mask >> bit_shift != 0xFF)
202 bit_shift++; 200 bit_shift++;
203 201
204 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | 202 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
205 (((u16) mc_addr[5]) << bit_shift))); 203 (((u16)mc_addr[5]) << bit_shift)));
206 204
207 return hash_value; 205 return hash_value;
208} 206}
@@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
221 * unless there are workarounds that change this. 219 * unless there are workarounds that change this.
222 **/ 220 **/
223static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 221static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count, 222 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count) 223 u32 rar_used_count, u32 rar_count)
226{ 224{
227 struct e1000_mbx_info *mbx = &hw->mbx; 225 struct e1000_mbx_info *mbx = &hw->mbx;
228 u32 msgbuf[E1000_VFMAILBOX_SIZE]; 226 u32 msgbuf[E1000_VFMAILBOX_SIZE];
@@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
305 * @addr: pointer to the receive address 303 * @addr: pointer to the receive address
306 * @index: receive address array register 304 * @index: receive address array register
307 **/ 305 **/
308static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) 306static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index)
309{ 307{
310 struct e1000_mbx_info *mbx = &hw->mbx; 308 struct e1000_mbx_info *mbx = &hw->mbx;
311 u32 msgbuf[3]; 309 u32 msgbuf[3];
@@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
354 s32 ret_val = E1000_SUCCESS; 352 s32 ret_val = E1000_SUCCESS;
355 u32 in_msg = 0; 353 u32 in_msg = 0;
356 354
357 /* 355 /* We only want to run this if there has been a rst asserted.
358 * We only want to run this if there has been a rst asserted.
359 * in this case that could mean a link change, device reset, 356 * in this case that could mean a link change, device reset,
360 * or a virtual function reset 357 * or a virtual function reset
361 */ 358 */
@@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
367 if (!mac->get_link_status) 364 if (!mac->get_link_status)
368 goto out; 365 goto out;
369 366
370 /* if link status is down no point in checking to see if pf is up */ 367 /* if link status is down no point in checking to see if PF is up */
371 if (!(er32(STATUS) & E1000_STATUS_LU)) 368 if (!(er32(STATUS) & E1000_STATUS_LU))
372 goto out; 369 goto out;
373 370
374 /* if the read failed it could just be a mailbox collision, best wait 371 /* if the read failed it could just be a mailbox collision, best wait
375 * until we are called again and don't report an error */ 372 * until we are called again and don't report an error
373 */
376 if (mbx->ops.read(hw, &in_msg, 1)) 374 if (mbx->ops.read(hw, &in_msg, 1))
377 goto out; 375 goto out;
378 376
379 /* if incoming message isn't clear to send we are waiting on response */ 377 /* if incoming message isn't clear to send we are waiting on response */
380 if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { 378 if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
381 /* message is not CTS and is NACK we must have lost CTS status */ 379 /* msg is not CTS and is NACK we must have lost CTS status */
382 if (in_msg & E1000_VT_MSGTYPE_NACK) 380 if (in_msg & E1000_VT_MSGTYPE_NACK)
383 ret_val = -E1000_ERR_MAC_INIT; 381 ret_val = -E1000_ERR_MAC_INIT;
384 goto out; 382 goto out;
385 } 383 }
386 384
387 /* the pf is talking, if we timed out in the past we reinit */ 385 /* the PF is talking, if we timed out in the past we reinit */
388 if (!mbx->timeout) { 386 if (!mbx->timeout) {
389 ret_val = -E1000_ERR_MAC_INIT; 387 ret_val = -E1000_ERR_MAC_INIT;
390 goto out; 388 goto out;
391 } 389 }
392 390
393 /* if we passed all the tests above then the link is up and we no 391 /* if we passed all the tests above then the link is up and we no
394 * longer need to check for link */ 392 * longer need to check for link
393 */
395 mac->get_link_status = false; 394 mac->get_link_status = false;
396 395
397out: 396out:
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index 57db3c68dfcd..0f1eca639f68 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -38,30 +37,29 @@
38 37
39struct e1000_hw; 38struct e1000_hw;
40 39
41#define E1000_DEV_ID_82576_VF 0x10CA 40#define E1000_DEV_ID_82576_VF 0x10CA
42#define E1000_DEV_ID_I350_VF 0x1520 41#define E1000_DEV_ID_I350_VF 0x1520
43#define E1000_REVISION_0 0 42#define E1000_REVISION_0 0
44#define E1000_REVISION_1 1 43#define E1000_REVISION_1 1
45#define E1000_REVISION_2 2 44#define E1000_REVISION_2 2
46#define E1000_REVISION_3 3 45#define E1000_REVISION_3 3
47#define E1000_REVISION_4 4 46#define E1000_REVISION_4 4
48 47
49#define E1000_FUNC_0 0 48#define E1000_FUNC_0 0
50#define E1000_FUNC_1 1 49#define E1000_FUNC_1 1
51 50
52/* 51/* Receive Address Register Count
53 * Receive Address Register Count
54 * Number of high/low register pairs in the RAR. The RAR (Receive Address 52 * Number of high/low register pairs in the RAR. The RAR (Receive Address
55 * Registers) holds the directed and multicast addresses that we monitor. 53 * Registers) holds the directed and multicast addresses that we monitor.
56 * These entries are also used for MAC-based filtering. 54 * These entries are also used for MAC-based filtering.
57 */ 55 */
58#define E1000_RAR_ENTRIES_VF 1 56#define E1000_RAR_ENTRIES_VF 1
59 57
60/* Receive Descriptor - Advanced */ 58/* Receive Descriptor - Advanced */
61union e1000_adv_rx_desc { 59union e1000_adv_rx_desc {
62 struct { 60 struct {
63 u64 pkt_addr; /* Packet buffer address */ 61 u64 pkt_addr; /* Packet buffer address */
64 u64 hdr_addr; /* Header buffer address */ 62 u64 hdr_addr; /* Header buffer address */
65 } read; 63 } read;
66 struct { 64 struct {
67 struct { 65 struct {
@@ -69,53 +67,53 @@ union e1000_adv_rx_desc {
69 u32 data; 67 u32 data;
70 struct { 68 struct {
71 u16 pkt_info; /* RSS/Packet type */ 69 u16 pkt_info; /* RSS/Packet type */
72 u16 hdr_info; /* Split Header, 70 /* Split Header, hdr buffer length */
73 * hdr buffer length */ 71 u16 hdr_info;
74 } hs_rss; 72 } hs_rss;
75 } lo_dword; 73 } lo_dword;
76 union { 74 union {
77 u32 rss; /* RSS Hash */ 75 u32 rss; /* RSS Hash */
78 struct { 76 struct {
79 u16 ip_id; /* IP id */ 77 u16 ip_id; /* IP id */
80 u16 csum; /* Packet Checksum */ 78 u16 csum; /* Packet Checksum */
81 } csum_ip; 79 } csum_ip;
82 } hi_dword; 80 } hi_dword;
83 } lower; 81 } lower;
84 struct { 82 struct {
85 u32 status_error; /* ext status/error */ 83 u32 status_error; /* ext status/error */
86 u16 length; /* Packet length */ 84 u16 length; /* Packet length */
87 u16 vlan; /* VLAN tag */ 85 u16 vlan; /* VLAN tag */
88 } upper; 86 } upper;
89 } wb; /* writeback */ 87 } wb; /* writeback */
90}; 88};
91 89
92#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 90#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
93#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 91#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
94 92
95/* Transmit Descriptor - Advanced */ 93/* Transmit Descriptor - Advanced */
96union e1000_adv_tx_desc { 94union e1000_adv_tx_desc {
97 struct { 95 struct {
98 u64 buffer_addr; /* Address of descriptor's data buf */ 96 u64 buffer_addr; /* Address of descriptor's data buf */
99 u32 cmd_type_len; 97 u32 cmd_type_len;
100 u32 olinfo_status; 98 u32 olinfo_status;
101 } read; 99 } read;
102 struct { 100 struct {
103 u64 rsvd; /* Reserved */ 101 u64 rsvd; /* Reserved */
104 u32 nxtseq_seed; 102 u32 nxtseq_seed;
105 u32 status; 103 u32 status;
106 } wb; 104 } wb;
107}; 105};
108 106
109/* Adv Transmit Descriptor Config Masks */ 107/* Adv Transmit Descriptor Config Masks */
110#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ 108#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
111#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 109#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
112#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ 110#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
113#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 111#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
114#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ 112#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
115#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ 113#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
116#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ 114#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
117#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ 115#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
118#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 116#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
119 117
120/* Context descriptors */ 118/* Context descriptors */
121struct e1000_adv_tx_context_desc { 119struct e1000_adv_tx_context_desc {
@@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc {
125 u32 mss_l4len_idx; 123 u32 mss_l4len_idx;
126}; 124};
127 125
128#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ 126#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
129#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ 127#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
130#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 128#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
131#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ 129#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
132#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ 130#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
133 131
134enum e1000_mac_type { 132enum e1000_mac_type {
135 e1000_undefined = 0, 133 e1000_undefined = 0,
@@ -262,5 +260,4 @@ struct e1000_hw {
262void e1000_rlpml_set_vf(struct e1000_hw *, u16); 260void e1000_rlpml_set_vf(struct e1000_hw *, u16);
263void e1000_init_function_pointers_vf(struct e1000_hw *hw); 261void e1000_init_function_pointers_vf(struct e1000_hw *hw);
264 262
265
266#endif /* _E1000_VF_H_ */ 263#endif /* _E1000_VF_H_ */