summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-05-30 21:09:58 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-30 21:09:58 -0400
commit8ed9b5e1c8f3cfc0d8c94f1a19d1167422eea7a8 (patch)
treed4dbba32d35544015839486921b30a87c97187fc /drivers/net
parent1ea23a21176e449685a9d0523ab6da83e3779eb1 (diff)
parentf029094e49814b56fdb3261a694c8890983b7a2d (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-05-28 This series contains updates to ethtool, ixgbe, i40e and i40evf. John adds helper routines for ethtool to pass VF to rx_flow_spec. Since the ring_cookie is 64 bits wide which is much larger than what could be used for actual queue index values, provide helper routines to pack a VF index into the cookie. Then John provides a ixgbe patch to allow flow director to use the entire queue space. Neerav provides a i40e patch to collect XOFF Rx stats, where it was not being collected before. Anjali provides ATR support for tunneled packets, as well as stats to count tunnel ATR hits. Cleaned up PF struct members which are unnecessary, since we can use the stat index macro directly. Cleaned up flow director ATR/SB messages to a higher debug level since they are not useful unless silicon validation is happening. Greg provides a patch to disable offline diagnostics if VFs are enabled since ethtool offline diagnostic tests are not designed (out of scope) to disable VF functions for testing and re-enable afterward. Also cleans up TODO comment that is no longer needed. Vasu provides a fix an FCoE EOF case where i40e_fcoe_ctxt_eof() maybe called before i40e_fcoe_eof_is_supported() is called. Jesse adds skb->xmit_more support for i40evf. Then provides a performance enhancement for i40evf by inlining some functions which provides a 15% gain in small packet performance. Also cleans up the use of time_stamp since it is no longer used to determine if there is a tx_hang and was a part of a previous tx_hang design which is no longer used. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c144
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c158
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c34
12 files changed, 247 insertions, 187 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 33c35d3b7420..aca9cef50d81 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
182enum i40e_fd_stat_idx { 182enum i40e_fd_stat_idx {
183 I40E_FD_STAT_ATR, 183 I40E_FD_STAT_ATR,
184 I40E_FD_STAT_SB, 184 I40E_FD_STAT_SB,
185 I40E_FD_STAT_ATR_TUNNEL,
185 I40E_FD_STAT_PF_COUNT 186 I40E_FD_STAT_PF_COUNT
186}; 187};
187#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) 188#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
189 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) 190 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
190#define I40E_FD_SB_STAT_IDX(pf_id) \ 191#define I40E_FD_SB_STAT_IDX(pf_id) \
191 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) 192 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
193#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
194 (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
192 195
193struct i40e_fdir_filter { 196struct i40e_fdir_filter {
194 struct hlist_node fdir_node; 197 struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
263 266
264 struct hlist_head fdir_filter_list; 267 struct hlist_head fdir_filter_list;
265 u16 fdir_pf_active_filters; 268 u16 fdir_pf_active_filters;
266 u16 fd_sb_cnt_idx;
267 u16 fd_atr_cnt_idx;
268 unsigned long fd_flush_timestamp; 269 unsigned long fd_flush_timestamp;
269 u32 fd_flush_cnt; 270 u32 fd_flush_cnt;
270 u32 fd_add_err; 271 u32 fd_add_err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4cbaaeb902c4..9a68c65b17ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), 148 I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
149 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), 149 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
150 I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
150 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), 151 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
151 152
152 /* LPI stats */ 153 /* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
1548 return *data; 1549 return *data;
1549} 1550}
1550 1551
1552static inline bool i40e_active_vfs(struct i40e_pf *pf)
1553{
1554 struct i40e_vf *vfs = pf->vf;
1555 int i;
1556
1557 for (i = 0; i < pf->num_alloc_vfs; i++)
1558 if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
1559 return true;
1560 return false;
1561}
1562
1551static void i40e_diag_test(struct net_device *netdev, 1563static void i40e_diag_test(struct net_device *netdev,
1552 struct ethtool_test *eth_test, u64 *data) 1564 struct ethtool_test *eth_test, u64 *data)
1553{ 1565{
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
1560 netif_info(pf, drv, netdev, "offline testing starting\n"); 1572 netif_info(pf, drv, netdev, "offline testing starting\n");
1561 1573
1562 set_bit(__I40E_TESTING, &pf->state); 1574 set_bit(__I40E_TESTING, &pf->state);
1575
1576 if (i40e_active_vfs(pf)) {
1577 dev_warn(&pf->pdev->dev,
1578 "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
1579 data[I40E_ETH_TEST_REG] = 1;
1580 data[I40E_ETH_TEST_EEPROM] = 1;
1581 data[I40E_ETH_TEST_INTR] = 1;
1582 data[I40E_ETH_TEST_LOOPBACK] = 1;
1583 data[I40E_ETH_TEST_LINK] = 1;
1584 eth_test->flags |= ETH_TEST_FL_FAILED;
1585 clear_bit(__I40E_TESTING, &pf->state);
1586 goto skip_ol_tests;
1587 }
1588
1563 /* If the device is online then take it offline */ 1589 /* If the device is online then take it offline */
1564 if (if_running) 1590 if (if_running)
1565 /* indicate we're in test mode */ 1591 /* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
1605 data[I40E_ETH_TEST_LOOPBACK] = 0; 1631 data[I40E_ETH_TEST_LOOPBACK] = 0;
1606 } 1632 }
1607 1633
1634skip_ol_tests:
1635
1608 netif_info(pf, drv, netdev, "testing finished\n"); 1636 netif_info(pf, drv, netdev, "testing finished\n");
1609} 1637}
1610 1638
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2265 input->pctype = 0; 2293 input->pctype = 0;
2266 input->dest_vsi = vsi->id; 2294 input->dest_vsi = vsi->id;
2267 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; 2295 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
2268 input->cnt_index = pf->fd_sb_cnt_idx; 2296 input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
2269 input->flow_type = fsp->flow_type; 2297 input->flow_type = fsp->flow_type;
2270 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; 2298 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
2271 2299
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1803afeef23e..c8b621e0e7cd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
118 * 118 *
119 * The FC EOF is converted to the value understood by HW for descriptor 119 * The FC EOF is converted to the value understood by HW for descriptor
120 * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() 120 * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
121 * first. 121 * first and that already checks for all supported valid eof values.
122 **/ 122 **/
123static inline u32 i40e_fcoe_ctxt_eof(u8 eof) 123static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
124{ 124{
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
132 case FC_EOF_A: 132 case FC_EOF_A:
133 return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; 133 return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
134 default: 134 default:
135 /* FIXME: still returns 0 */ 135 /* Supported valid eof shall be already checked by
136 pr_err("Unrecognized EOF %x\n", eof); 136 * calling i40e_fcoe_eof_is_supported() first,
137 return 0; 137 * therefore this default case shall never hit.
138 */
139 WARN_ON(1);
140 return -EINVAL;
138 } 141 }
139} 142}
140 143
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a54c14491e3b..0a3e928a2b00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 3 41#define DRV_VERSION_MINOR 3
42#define DRV_VERSION_BUILD 2 42#define DRV_VERSION_BUILD 4
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
772 772
773 dcb_cfg = &hw->local_dcbx_config; 773 dcb_cfg = &hw->local_dcbx_config;
774 774
775 /* See if DCB enabled with PFC TC */ 775 /* Collect Link XOFF stats when PFC is disabled */
776 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || 776 if (!dcb_cfg->pfc.pfcenable) {
777 !(dcb_cfg->pfc.pfcenable)) {
778 i40e_update_link_xoff_rx(pf); 777 i40e_update_link_xoff_rx(pf);
779 return; 778 return;
780 } 779 }
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
1097 &osd->rx_jabber, &nsd->rx_jabber); 1096 &osd->rx_jabber, &nsd->rx_jabber);
1098 1097
1099 /* FDIR stats */ 1098 /* FDIR stats */
1100 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), 1099 i40e_stat_update32(hw,
1100 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1101 pf->stat_offsets_loaded, 1101 pf->stat_offsets_loaded,
1102 &osd->fd_atr_match, &nsd->fd_atr_match); 1102 &osd->fd_atr_match, &nsd->fd_atr_match);
1103 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), 1103 i40e_stat_update32(hw,
1104 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1104 pf->stat_offsets_loaded, 1105 pf->stat_offsets_loaded,
1105 &osd->fd_sb_match, &nsd->fd_sb_match); 1106 &osd->fd_sb_match, &nsd->fd_sb_match);
1107 i40e_stat_update32(hw,
1108 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1109 pf->stat_offsets_loaded,
1110 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1106 1111
1107 val = rd32(hw, I40E_PRTPM_EEE_STAT); 1112 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1108 nsd->tx_lpi_status = 1113 nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4739 pf->fd_add_err = pf->fd_atr_cnt = 0; 4744 pf->fd_add_err = pf->fd_atr_cnt = 0;
4740 if (pf->fd_tcp_rule > 0) { 4745 if (pf->fd_tcp_rule > 0) {
4741 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 4746 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4742 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); 4747 if (I40E_DEBUG_FD & pf->hw.debug_mask)
4748 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4743 pf->fd_tcp_rule = 0; 4749 pf->fd_tcp_rule = 0;
4744 } 4750 }
4745 i40e_fdir_filter_restore(vsi); 4751 i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5428 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5434 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5429 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5435 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5430 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5436 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5431 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); 5437 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5438 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5432 } 5439 }
5433 } 5440 }
5434 /* Wait for some more space to be available to turn on ATR */ 5441 /* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5436 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 5443 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5437 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { 5444 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5438 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; 5445 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5439 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); 5446 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5447 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5440 } 5448 }
5441 } 5449 }
5442} 5450}
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5469 5477
5470 if (!(time_after(jiffies, min_flush_time)) && 5478 if (!(time_after(jiffies, min_flush_time)) &&
5471 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { 5479 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5472 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); 5480 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5481 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5473 disable_atr = true; 5482 disable_atr = true;
5474 } 5483 }
5475 5484
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5496 if (!disable_atr) 5505 if (!disable_atr)
5497 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 5506 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5498 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); 5507 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5499 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5508 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5509 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5500 } 5510 }
5501 } 5511 }
5502} 5512}
@@ -7676,12 +7686,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
7676 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 7686 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7677 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7687 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7678 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 7688 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7679 /* Setup a counter for fd_atr per PF */
7680 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
7681 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 7689 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7682 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 7690 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7683 /* Setup a counter for fd_sb per PF */
7684 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
7685 } else { 7691 } else {
7686 dev_info(&pf->pdev->dev, 7692 dev_info(&pf->pdev->dev,
7687 "Flow Director Sideband mode Disabled in MFP mode\n"); 7693 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7771,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7771 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; 7777 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7772 pf->fdir_pf_active_filters = 0; 7778 pf->fdir_pf_active_filters = 0;
7773 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 7779 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7774 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); 7780 if (I40E_DEBUG_FD & pf->hw.debug_mask)
7781 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7775 /* if ATR was auto disabled it can be re-enabled. */ 7782 /* if ATR was auto disabled it can be re-enabled. */
7776 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 7783 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7777 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 7784 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0b4a7be2c7d2..cc82a7ffacb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
165 tx_desc->cmd_type_offset_bsz = 165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); 166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
167 167
168 /* set the timestamp */
169 tx_buf->time_stamp = jiffies;
170
171 /* Force memory writes to complete before letting h/w 168 /* Force memory writes to complete before letting h/w
172 * know there are new descriptors to fetch. 169 * know there are new descriptors to fetch.
173 */ 170 */
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
283 if (add) { 280 if (add) {
284 pf->fd_tcp_rule++; 281 pf->fd_tcp_rule++;
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { 282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
286 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); 283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
287 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
288 } 286 }
289 } else { 287 } else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
291 (pf->fd_tcp_rule - 1) : 0; 289 (pf->fd_tcp_rule - 1) : 0;
292 if (pf->fd_tcp_rule == 0) { 290 if (pf->fd_tcp_rule == 0) {
293 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
294 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); 292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
295 } 294 }
296 } 295 }
297 296
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
501 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 500 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
502 !(pf->auto_disable_flags & 501 !(pf->auto_disable_flags &
503 I40E_FLAG_FD_SB_ENABLED)) { 502 I40E_FLAG_FD_SB_ENABLED)) {
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 503 if (I40E_DEBUG_FD & pf->hw.debug_mask)
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
505 pf->auto_disable_flags |= 505 pf->auto_disable_flags |=
506 I40E_FLAG_FD_SB_ENABLED; 506 I40E_FLAG_FD_SB_ENABLED;
507 } 507 }
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
807 tx_ring->vsi->seid, 807 tx_ring->vsi->seid,
808 tx_ring->queue_index, 808 tx_ring->queue_index,
809 tx_ring->next_to_use, i); 809 tx_ring->next_to_use, i);
810 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
811 " time_stamp <%lx>\n"
812 " jiffies <%lx>\n",
813 tx_ring->tx_bi[i].time_stamp, jiffies);
814 810
815 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 811 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
816 812
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1653 /* ERR_MASK will only have valid bits if EOP set */ 1649 /* ERR_MASK will only have valid bits if EOP set */
1654 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 1650 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1655 dev_kfree_skb_any(skb); 1651 dev_kfree_skb_any(skb);
1656 /* TODO: shouldn't we increment a counter indicating the
1657 * drop?
1658 */
1659 continue; 1652 continue;
1660 } 1653 }
1661 1654
@@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1923 * i40e_atr - Add a Flow Director ATR filter 1916 * i40e_atr - Add a Flow Director ATR filter
1924 * @tx_ring: ring to add programming descriptor to 1917 * @tx_ring: ring to add programming descriptor to
1925 * @skb: send buffer 1918 * @skb: send buffer
1926 * @flags: send flags 1919 * @tx_flags: send tx flags
1927 * @protocol: wire protocol 1920 * @protocol: wire protocol
1928 **/ 1921 **/
1929static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, 1922static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1930 u32 flags, __be16 protocol) 1923 u32 tx_flags, __be16 protocol)
1931{ 1924{
1932 struct i40e_filter_program_desc *fdir_desc; 1925 struct i40e_filter_program_desc *fdir_desc;
1933 struct i40e_pf *pf = tx_ring->vsi->back; 1926 struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1952 if (!tx_ring->atr_sample_rate) 1945 if (!tx_ring->atr_sample_rate)
1953 return; 1946 return;
1954 1947
1955 /* snag network header to get L4 type and address */ 1948 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1956 hdr.network = skb_network_header(skb); 1949 return;
1957 1950
1958 /* Currently only IPv4/IPv6 with TCP is supported */ 1951 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1959 if (protocol == htons(ETH_P_IP)) { 1952 /* snag network header to get L4 type and address */
1960 if (hdr.ipv4->protocol != IPPROTO_TCP) 1953 hdr.network = skb_network_header(skb);
1961 return;
1962 1954
1963 /* access ihl as a u8 to avoid unaligned access on ia64 */ 1955 /* Currently only IPv4/IPv6 with TCP is supported
1964 hlen = (hdr.network[0] & 0x0F) << 2; 1956 * access ihl as u8 to avoid unaligned access on ia64
1965 } else if (protocol == htons(ETH_P_IPV6)) { 1957 */
1966 if (hdr.ipv6->nexthdr != IPPROTO_TCP) 1958 if (tx_flags & I40E_TX_FLAGS_IPV4)
1959 hlen = (hdr.network[0] & 0x0F) << 2;
1960 else if (protocol == htons(ETH_P_IPV6))
1961 hlen = sizeof(struct ipv6hdr);
1962 else
1967 return; 1963 return;
1968
1969 hlen = sizeof(struct ipv6hdr);
1970 } else { 1964 } else {
1971 return; 1965 hdr.network = skb_inner_network_header(skb);
1966 hlen = skb_inner_network_header_len(skb);
1972 } 1967 }
1973 1968
1969 /* Currently only IPv4/IPv6 with TCP is supported
1970 * Note: tx_flags gets modified to reflect inner protocols in
1971 * tx_enable_csum function if encap is enabled.
1972 */
1973 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1974 (hdr.ipv4->protocol != IPPROTO_TCP))
1975 return;
1976 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1977 (hdr.ipv6->nexthdr != IPPROTO_TCP))
1978 return;
1979
1974 th = (struct tcphdr *)(hdr.network + hlen); 1980 th = (struct tcphdr *)(hdr.network + hlen);
1975 1981
1976 /* Due to lack of space, no more new filters can be programmed */ 1982 /* Due to lack of space, no more new filters can be programmed */
@@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2020 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; 2026 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2021 2027
2022 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; 2028 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2023 dtype_cmd |= 2029 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2024 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2030 dtype_cmd |=
2025 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2031 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2032 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2033 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2034 else
2035 dtype_cmd |=
2036 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2037 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2038 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2026 2039
2027 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); 2040 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2028 fdir_desc->rsvd = cpu_to_le32(0); 2041 fdir_desc->rsvd = cpu_to_le32(0);
@@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2043 * otherwise returns 0 to indicate the flags has been set properly. 2056 * otherwise returns 0 to indicate the flags has been set properly.
2044 **/ 2057 **/
2045#ifdef I40E_FCOE 2058#ifdef I40E_FCOE
2046int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 2059inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2047 struct i40e_ring *tx_ring,
2048 u32 *flags)
2049#else
2050static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2051 struct i40e_ring *tx_ring, 2060 struct i40e_ring *tx_ring,
2052 u32 *flags) 2061 u32 *flags)
2062#else
2063static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2064 struct i40e_ring *tx_ring,
2065 u32 *flags)
2053#endif 2066#endif
2054{ 2067{
2055 __be16 protocol = skb->protocol; 2068 __be16 protocol = skb->protocol;
@@ -2117,16 +2130,14 @@ out:
2117 * i40e_tso - set up the tso context descriptor 2130 * i40e_tso - set up the tso context descriptor
2118 * @tx_ring: ptr to the ring to send 2131 * @tx_ring: ptr to the ring to send
2119 * @skb: ptr to the skb we're sending 2132 * @skb: ptr to the skb we're sending
2120 * @tx_flags: the collected send information
2121 * @protocol: the send protocol
2122 * @hdr_len: ptr to the size of the packet header 2133 * @hdr_len: ptr to the size of the packet header
2123 * @cd_tunneling: ptr to context descriptor bits 2134 * @cd_tunneling: ptr to context descriptor bits
2124 * 2135 *
2125 * Returns 0 if no TSO can happen, 1 if tso is going, or error 2136 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2126 **/ 2137 **/
2127static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, 2138static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2128 u32 tx_flags, __be16 protocol, u8 *hdr_len, 2139 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2129 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 2140 u32 *cd_tunneling)
2130{ 2141{
2131 u32 cd_cmd, cd_tso_len, cd_mss; 2142 u32 cd_cmd, cd_tso_len, cd_mss;
2132 struct ipv6hdr *ipv6h; 2143 struct ipv6hdr *ipv6h;
@@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2218/** 2229/**
2219 * i40e_tx_enable_csum - Enable Tx checksum offloads 2230 * i40e_tx_enable_csum - Enable Tx checksum offloads
2220 * @skb: send buffer 2231 * @skb: send buffer
2221 * @tx_flags: Tx flags currently set 2232 * @tx_flags: pointer to Tx flags currently set
2222 * @td_cmd: Tx descriptor command bits to set 2233 * @td_cmd: Tx descriptor command bits to set
2223 * @td_offset: Tx descriptor header offsets to set 2234 * @td_offset: Tx descriptor header offsets to set
2224 * @cd_tunneling: ptr to context desc bits 2235 * @cd_tunneling: ptr to context desc bits
2225 **/ 2236 **/
2226static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, 2237static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2227 u32 *td_cmd, u32 *td_offset, 2238 u32 *td_cmd, u32 *td_offset,
2228 struct i40e_ring *tx_ring, 2239 struct i40e_ring *tx_ring,
2229 u32 *cd_tunneling) 2240 u32 *cd_tunneling)
@@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
2239 switch (ip_hdr(skb)->protocol) { 2250 switch (ip_hdr(skb)->protocol) {
2240 case IPPROTO_UDP: 2251 case IPPROTO_UDP:
2241 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; 2252 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2253 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2242 break; 2254 break;
2243 default: 2255 default:
2244 return; 2256 return;
@@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
2248 this_ipv6_hdr = inner_ipv6_hdr(skb); 2260 this_ipv6_hdr = inner_ipv6_hdr(skb);
2249 this_tcp_hdrlen = inner_tcp_hdrlen(skb); 2261 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2250 2262
2251 if (tx_flags & I40E_TX_FLAGS_IPV4) { 2263 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2252 2264 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2253 if (tx_flags & I40E_TX_FLAGS_TSO) {
2254 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; 2265 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2255 ip_hdr(skb)->check = 0; 2266 ip_hdr(skb)->check = 0;
2256 } else { 2267 } else {
2257 *cd_tunneling |= 2268 *cd_tunneling |=
2258 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 2269 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2259 } 2270 }
2260 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 2271 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2261 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 2272 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2262 if (tx_flags & I40E_TX_FLAGS_TSO) 2273 if (*tx_flags & I40E_TX_FLAGS_TSO)
2263 ip_hdr(skb)->check = 0; 2274 ip_hdr(skb)->check = 0;
2264 } 2275 }
2265 2276
@@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
2271 skb_transport_offset(skb)) >> 1) << 2282 skb_transport_offset(skb)) >> 1) <<
2272 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 2283 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2273 if (this_ip_hdr->version == 6) { 2284 if (this_ip_hdr->version == 6) {
2274 tx_flags &= ~I40E_TX_FLAGS_IPV4; 2285 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2275 tx_flags |= I40E_TX_FLAGS_IPV6; 2286 *tx_flags |= I40E_TX_FLAGS_IPV6;
2276 } 2287 }
2277 } else { 2288 } else {
2278 network_hdr_len = skb_network_header_len(skb); 2289 network_hdr_len = skb_network_header_len(skb);
@@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
2282 } 2293 }
2283 2294
2284 /* Enable IP checksum offloads */ 2295 /* Enable IP checksum offloads */
2285 if (tx_flags & I40E_TX_FLAGS_IPV4) { 2296 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2286 l4_hdr = this_ip_hdr->protocol; 2297 l4_hdr = this_ip_hdr->protocol;
2287 /* the stack computes the IP header already, the only time we 2298 /* the stack computes the IP header already, the only time we
2288 * need the hardware to recompute it is in the case of TSO. 2299 * need the hardware to recompute it is in the case of TSO.
2289 */ 2300 */
2290 if (tx_flags & I40E_TX_FLAGS_TSO) { 2301 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2291 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; 2302 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2292 this_ip_hdr->check = 0; 2303 this_ip_hdr->check = 0;
2293 } else { 2304 } else {
@@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
2296 /* Now set the td_offset for IP header length */ 2307 /* Now set the td_offset for IP header length */
2297 *td_offset = (network_hdr_len >> 2) << 2308 *td_offset = (network_hdr_len >> 2) <<
2298 I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 2309 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2299 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 2310 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2300 l4_hdr = this_ipv6_hdr->nexthdr; 2311 l4_hdr = this_ipv6_hdr->nexthdr;
2301 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 2312 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2302 /* Now set the td_offset for IP header length */ 2313 /* Now set the td_offset for IP header length */
@@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2394 * Returns 0 if stop is not needed 2405 * Returns 0 if stop is not needed
2395 **/ 2406 **/
2396#ifdef I40E_FCOE 2407#ifdef I40E_FCOE
2397int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 2408inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2398#else 2409#else
2399static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) 2410static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2400#endif 2411#endif
2401{ 2412{
2402 if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) 2413 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2476,13 +2487,13 @@ linearize_chk_done:
2476 * @td_offset: offset for checksum or crc 2487 * @td_offset: offset for checksum or crc
2477 **/ 2488 **/
2478#ifdef I40E_FCOE 2489#ifdef I40E_FCOE
2479void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 2490inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2480 struct i40e_tx_buffer *first, u32 tx_flags,
2481 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2482#else
2483static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2484 struct i40e_tx_buffer *first, u32 tx_flags, 2491 struct i40e_tx_buffer *first, u32 tx_flags,
2485 const u8 hdr_len, u32 td_cmd, u32 td_offset) 2492 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2493#else
2494static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2495 struct i40e_tx_buffer *first, u32 tx_flags,
2496 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2486#endif 2497#endif
2487{ 2498{
2488 unsigned int data_len = skb->data_len; 2499 unsigned int data_len = skb->data_len;
@@ -2588,9 +2599,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2588 tx_ring->queue_index), 2599 tx_ring->queue_index),
2589 first->bytecount); 2600 first->bytecount);
2590 2601
2591 /* set the timestamp */
2592 first->time_stamp = jiffies;
2593
2594 /* Force memory writes to complete before letting h/w 2602 /* Force memory writes to complete before letting h/w
2595 * know there are new descriptors to fetch. (Only 2603 * know there are new descriptors to fetch. (Only
2596 * applicable for weak-ordered memory model archs, 2604 * applicable for weak-ordered memory model archs,
@@ -2643,11 +2651,11 @@ dma_error:
2643 * one descriptor. 2651 * one descriptor.
2644 **/ 2652 **/
2645#ifdef I40E_FCOE 2653#ifdef I40E_FCOE
2646int i40e_xmit_descriptor_count(struct sk_buff *skb, 2654inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2647 struct i40e_ring *tx_ring)
2648#else
2649static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2650 struct i40e_ring *tx_ring) 2655 struct i40e_ring *tx_ring)
2656#else
2657static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2658 struct i40e_ring *tx_ring)
2651#endif 2659#endif
2652{ 2660{
2653 unsigned int f; 2661 unsigned int f;
@@ -2709,7 +2717,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2709 else if (protocol == htons(ETH_P_IPV6)) 2717 else if (protocol == htons(ETH_P_IPV6))
2710 tx_flags |= I40E_TX_FLAGS_IPV6; 2718 tx_flags |= I40E_TX_FLAGS_IPV6;
2711 2719
2712 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, 2720 tso = i40e_tso(tx_ring, skb, &hdr_len,
2713 &cd_type_cmd_tso_mss, &cd_tunneling); 2721 &cd_type_cmd_tso_mss, &cd_tunneling);
2714 2722
2715 if (tso < 0) 2723 if (tso < 0)
@@ -2735,7 +2743,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2735 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2743 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2736 tx_flags |= I40E_TX_FLAGS_CSUM; 2744 tx_flags |= I40E_TX_FLAGS_CSUM;
2737 2745
2738 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, 2746 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2739 tx_ring, &cd_tunneling); 2747 tx_ring, &cd_tunneling);
2740 } 2748 }
2741 2749
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4b0b8102cdc3..0dc48dc9ca61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
139#define I40E_TX_FLAGS_FSO (u32)(1 << 7) 139#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
140#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) 140#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
141#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) 141#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
142#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
142#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 143#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
143#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 144#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
144#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 145#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
146 147
147struct i40e_tx_buffer { 148struct i40e_tx_buffer {
148 struct i40e_tx_desc *next_to_watch; 149 struct i40e_tx_desc *next_to_watch;
149 unsigned long time_stamp;
150 union { 150 union {
151 struct sk_buff *skb; 151 struct sk_buff *skb;
152 void *raw_buf; 152 void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 568e855da0f3..9a5a75b1e2bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
1133 /* flow director stats */ 1133 /* flow director stats */
1134 u64 fd_atr_match; 1134 u64 fd_atr_match;
1135 u64 fd_sb_match; 1135 u64 fd_sb_match;
1136 u64 fd_atr_tunnel_match;
1136 /* EEE LPI */ 1137 /* EEE LPI */
1137 u32 tx_lpi_status; 1138 u32 tx_lpi_status;
1138 u32 rx_lpi_status; 1139 u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78d1c4ff565e..4653b6e653c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
980 int pre_existing_vfs = pci_num_vf(pdev); 980 int pre_existing_vfs = pci_num_vf(pdev);
981 int err = 0; 981 int err = 0;
982 982
983 if (pf->state & __I40E_TESTING) {
984 dev_warn(&pdev->dev,
985 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
986 err = -EPERM;
987 goto err_out;
988 }
989
983 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); 990 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
984 if (pre_existing_vfs && pre_existing_vfs != num_vfs) 991 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
985 i40e_free_vfs(pf); 992 i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 3ef23091439f..ec7e220757db 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
322 tx_ring->vsi->seid, 322 tx_ring->vsi->seid,
323 tx_ring->queue_index, 323 tx_ring->queue_index,
324 tx_ring->next_to_use, i); 324 tx_ring->next_to_use, i);
325 dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
326 " time_stamp <%lx>\n"
327 " jiffies <%lx>\n",
328 tx_ring->tx_bi[i].time_stamp, jiffies);
329 325
330 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 326 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
331 327
@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1128 /* ERR_MASK will only have valid bits if EOP set */ 1124 /* ERR_MASK will only have valid bits if EOP set */
1129 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 1125 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1130 dev_kfree_skb_any(skb); 1126 dev_kfree_skb_any(skb);
1131 /* TODO: shouldn't we increment a counter indicating the
1132 * drop?
1133 */
1134 continue; 1127 continue;
1135 } 1128 }
1136 1129
@@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1350} 1343}
1351 1344
1352/** 1345/**
1353 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 1346 * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1354 * @skb: send buffer 1347 * @skb: send buffer
1355 * @tx_ring: ring to send buffer on 1348 * @tx_ring: ring to send buffer on
1356 * @flags: the tx flags to be set 1349 * @flags: the tx flags to be set
@@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1361 * Returns error code indicate the frame should be dropped upon error and the 1354 * Returns error code indicate the frame should be dropped upon error and the
1362 * otherwise returns 0 to indicate the flags has been set properly. 1355 * otherwise returns 0 to indicate the flags has been set properly.
1363 **/ 1356 **/
1364static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, 1357static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
1365 struct i40e_ring *tx_ring, 1358 struct i40e_ring *tx_ring,
1366 u32 *flags) 1359 u32 *flags)
1367{ 1360{
1368 __be16 protocol = skb->protocol; 1361 __be16 protocol = skb->protocol;
1369 u32 tx_flags = 0; 1362 u32 tx_flags = 0;
@@ -1406,16 +1399,14 @@ out:
1406 * i40e_tso - set up the tso context descriptor 1399 * i40e_tso - set up the tso context descriptor
1407 * @tx_ring: ptr to the ring to send 1400 * @tx_ring: ptr to the ring to send
1408 * @skb: ptr to the skb we're sending 1401 * @skb: ptr to the skb we're sending
1409 * @tx_flags: the collected send information
1410 * @protocol: the send protocol
1411 * @hdr_len: ptr to the size of the packet header 1402 * @hdr_len: ptr to the size of the packet header
1412 * @cd_tunneling: ptr to context descriptor bits 1403 * @cd_tunneling: ptr to context descriptor bits
1413 * 1404 *
1414 * Returns 0 if no TSO can happen, 1 if tso is going, or error 1405 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1415 **/ 1406 **/
1416static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, 1407static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1417 u32 tx_flags, __be16 protocol, u8 *hdr_len, 1408 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
1418 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1409 u32 *cd_tunneling)
1419{ 1410{
1420 u32 cd_cmd, cd_tso_len, cd_mss; 1411 u32 cd_cmd, cd_tso_len, cd_mss;
1421 struct ipv6hdr *ipv6h; 1412 struct ipv6hdr *ipv6h;
@@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1466/** 1457/**
1467 * i40e_tx_enable_csum - Enable Tx checksum offloads 1458 * i40e_tx_enable_csum - Enable Tx checksum offloads
1468 * @skb: send buffer 1459 * @skb: send buffer
1469 * @tx_flags: Tx flags currently set 1460 * @tx_flags: pointer to Tx flags currently set
1470 * @td_cmd: Tx descriptor command bits to set 1461 * @td_cmd: Tx descriptor command bits to set
1471 * @td_offset: Tx descriptor header offsets to set 1462 * @td_offset: Tx descriptor header offsets to set
1472 * @cd_tunneling: ptr to context desc bits 1463 * @cd_tunneling: ptr to context desc bits
1473 **/ 1464 **/
1474static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, 1465static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1475 u32 *td_cmd, u32 *td_offset, 1466 u32 *td_cmd, u32 *td_offset,
1476 struct i40e_ring *tx_ring, 1467 struct i40e_ring *tx_ring,
1477 u32 *cd_tunneling) 1468 u32 *cd_tunneling)
@@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1487 switch (ip_hdr(skb)->protocol) { 1478 switch (ip_hdr(skb)->protocol) {
1488 case IPPROTO_UDP: 1479 case IPPROTO_UDP:
1489 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; 1480 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
1481 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1490 break; 1482 break;
1491 default: 1483 default:
1492 return; 1484 return;
@@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1496 this_ipv6_hdr = inner_ipv6_hdr(skb); 1488 this_ipv6_hdr = inner_ipv6_hdr(skb);
1497 this_tcp_hdrlen = inner_tcp_hdrlen(skb); 1489 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1498 1490
1499 if (tx_flags & I40E_TX_FLAGS_IPV4) { 1491 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1500 1492 if (*tx_flags & I40E_TX_FLAGS_TSO) {
1501 if (tx_flags & I40E_TX_FLAGS_TSO) {
1502 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; 1493 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1503 ip_hdr(skb)->check = 0; 1494 ip_hdr(skb)->check = 0;
1504 } else { 1495 } else {
1505 *cd_tunneling |= 1496 *cd_tunneling |=
1506 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1497 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1507 } 1498 }
1508 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1499 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1509 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1500 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1510 if (tx_flags & I40E_TX_FLAGS_TSO) 1501 if (*tx_flags & I40E_TX_FLAGS_TSO)
1511 ip_hdr(skb)->check = 0; 1502 ip_hdr(skb)->check = 0;
1512 } 1503 }
1513 1504
@@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1519 skb_transport_offset(skb)) >> 1) << 1510 skb_transport_offset(skb)) >> 1) <<
1520 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1511 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1521 if (this_ip_hdr->version == 6) { 1512 if (this_ip_hdr->version == 6) {
1522 tx_flags &= ~I40E_TX_FLAGS_IPV4; 1513 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
1523 tx_flags |= I40E_TX_FLAGS_IPV6; 1514 *tx_flags |= I40E_TX_FLAGS_IPV6;
1524 } 1515 }
1525 1516
1526 1517
@@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1532 } 1523 }
1533 1524
1534 /* Enable IP checksum offloads */ 1525 /* Enable IP checksum offloads */
1535 if (tx_flags & I40E_TX_FLAGS_IPV4) { 1526 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1536 l4_hdr = this_ip_hdr->protocol; 1527 l4_hdr = this_ip_hdr->protocol;
1537 /* the stack computes the IP header already, the only time we 1528 /* the stack computes the IP header already, the only time we
1538 * need the hardware to recompute it is in the case of TSO. 1529 * need the hardware to recompute it is in the case of TSO.
1539 */ 1530 */
1540 if (tx_flags & I40E_TX_FLAGS_TSO) { 1531 if (*tx_flags & I40E_TX_FLAGS_TSO) {
1541 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; 1532 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1542 this_ip_hdr->check = 0; 1533 this_ip_hdr->check = 0;
1543 } else { 1534 } else {
@@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1546 /* Now set the td_offset for IP header length */ 1537 /* Now set the td_offset for IP header length */
1547 *td_offset = (network_hdr_len >> 2) << 1538 *td_offset = (network_hdr_len >> 2) <<
1548 I40E_TX_DESC_LENGTH_IPLEN_SHIFT; 1539 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1549 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1540 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1550 l4_hdr = this_ipv6_hdr->nexthdr; 1541 l4_hdr = this_ipv6_hdr->nexthdr;
1551 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; 1542 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1552 /* Now set the td_offset for IP header length */ 1543 /* Now set the td_offset for IP header length */
@@ -1675,7 +1666,44 @@ linearize_chk_done:
1675} 1666}
1676 1667
1677/** 1668/**
1678 * i40e_tx_map - Build the Tx descriptor 1669 * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
1670 * @tx_ring: the ring to be checked
1671 * @size: the size buffer we want to assure is available
1672 *
1673 * Returns -EBUSY if a stop is needed, else 0
1674 **/
1675static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1676{
1677 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1678 /* Memory barrier before checking head and tail */
1679 smp_mb();
1680
1681 /* Check again in a case another CPU has just made room available. */
1682 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
1683 return -EBUSY;
1684
1685 /* A reprieve! - use start_queue because it doesn't call schedule */
1686 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1687 ++tx_ring->tx_stats.restart_queue;
1688 return 0;
1689}
1690
1691/**
1692 * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
1693 * @tx_ring: the ring to be checked
1694 * @size: the size buffer we want to assure is available
1695 *
1696 * Returns 0 if stop is not needed
1697 **/
1698static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1699{
1700 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
1701 return 0;
1702 return __i40evf_maybe_stop_tx(tx_ring, size);
1703}
1704
1705/**
1706 * i40evf_tx_map - Build the Tx descriptor
1679 * @tx_ring: ring to send buffer on 1707 * @tx_ring: ring to send buffer on
1680 * @skb: send buffer 1708 * @skb: send buffer
1681 * @first: first buffer info buffer to use 1709 * @first: first buffer info buffer to use
@@ -1684,9 +1712,9 @@ linearize_chk_done:
1684 * @td_cmd: the command field in the descriptor 1712 * @td_cmd: the command field in the descriptor
1685 * @td_offset: offset for checksum or crc 1713 * @td_offset: offset for checksum or crc
1686 **/ 1714 **/
1687static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, 1715static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1688 struct i40e_tx_buffer *first, u32 tx_flags, 1716 struct i40e_tx_buffer *first, u32 tx_flags,
1689 const u8 hdr_len, u32 td_cmd, u32 td_offset) 1717 const u8 hdr_len, u32 td_cmd, u32 td_offset)
1690{ 1718{
1691 unsigned int data_len = skb->data_len; 1719 unsigned int data_len = skb->data_len;
1692 unsigned int size = skb_headlen(skb); 1720 unsigned int size = skb_headlen(skb);
@@ -1792,9 +1820,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1792 tx_ring->queue_index), 1820 tx_ring->queue_index),
1793 first->bytecount); 1821 first->bytecount);
1794 1822
1795 /* set the timestamp */
1796 first->time_stamp = jiffies;
1797
1798 /* Force memory writes to complete before letting h/w 1823 /* Force memory writes to complete before letting h/w
1799 * know there are new descriptors to fetch. (Only 1824 * know there are new descriptors to fetch. (Only
1800 * applicable for weak-ordered memory model archs, 1825 * applicable for weak-ordered memory model archs,
@@ -1811,8 +1836,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1811 1836
1812 tx_ring->next_to_use = i; 1837 tx_ring->next_to_use = i;
1813 1838
1839 i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
1814 /* notify HW of packet */ 1840 /* notify HW of packet */
1815 writel(i, tx_ring->tail); 1841 if (!skb->xmit_more ||
1842 netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
1843 tx_ring->queue_index)))
1844 writel(i, tx_ring->tail);
1816 1845
1817 return; 1846 return;
1818 1847
@@ -1834,44 +1863,7 @@ dma_error:
1834} 1863}
1835 1864
1836/** 1865/**
1837 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions 1866 * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
1838 * @tx_ring: the ring to be checked
1839 * @size: the size buffer we want to assure is available
1840 *
1841 * Returns -EBUSY if a stop is needed, else 0
1842 **/
1843static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1844{
1845 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1846 /* Memory barrier before checking head and tail */
1847 smp_mb();
1848
1849 /* Check again in a case another CPU has just made room available. */
1850 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
1851 return -EBUSY;
1852
1853 /* A reprieve! - use start_queue because it doesn't call schedule */
1854 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1855 ++tx_ring->tx_stats.restart_queue;
1856 return 0;
1857}
1858
1859/**
1860 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
1861 * @tx_ring: the ring to be checked
1862 * @size: the size buffer we want to assure is available
1863 *
1864 * Returns 0 if stop is not needed
1865 **/
1866static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1867{
1868 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
1869 return 0;
1870 return __i40e_maybe_stop_tx(tx_ring, size);
1871}
1872
1873/**
1874 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
1875 * @skb: send buffer 1867 * @skb: send buffer
1876 * @tx_ring: ring to send buffer on 1868 * @tx_ring: ring to send buffer on
1877 * 1869 *
@@ -1879,8 +1871,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1879 * there is not enough descriptors available in this ring since we need at least 1871 * there is not enough descriptors available in this ring since we need at least
1880 * one descriptor. 1872 * one descriptor.
1881 **/ 1873 **/
1882static int i40e_xmit_descriptor_count(struct sk_buff *skb, 1874static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
1883 struct i40e_ring *tx_ring) 1875 struct i40e_ring *tx_ring)
1884{ 1876{
1885 unsigned int f; 1877 unsigned int f;
1886 int count = 0; 1878 int count = 0;
@@ -1895,7 +1887,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1895 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 1887 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1896 1888
1897 count += TXD_USE_COUNT(skb_headlen(skb)); 1889 count += TXD_USE_COUNT(skb_headlen(skb));
1898 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { 1890 if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1899 tx_ring->tx_stats.tx_busy++; 1891 tx_ring->tx_stats.tx_busy++;
1900 return 0; 1892 return 0;
1901 } 1893 }
@@ -1921,11 +1913,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1921 u32 td_cmd = 0; 1913 u32 td_cmd = 0;
1922 u8 hdr_len = 0; 1914 u8 hdr_len = 0;
1923 int tso; 1915 int tso;
1924 if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) 1916 if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
1925 return NETDEV_TX_BUSY; 1917 return NETDEV_TX_BUSY;
1926 1918
1927 /* prepare the xmit flags */ 1919 /* prepare the xmit flags */
1928 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) 1920 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
1929 goto out_drop; 1921 goto out_drop;
1930 1922
1931 /* obtain protocol of skb */ 1923 /* obtain protocol of skb */
@@ -1940,7 +1932,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1940 else if (protocol == htons(ETH_P_IPV6)) 1932 else if (protocol == htons(ETH_P_IPV6))
1941 tx_flags |= I40E_TX_FLAGS_IPV6; 1933 tx_flags |= I40E_TX_FLAGS_IPV6;
1942 1934
1943 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, 1935 tso = i40e_tso(tx_ring, skb, &hdr_len,
1944 &cd_type_cmd_tso_mss, &cd_tunneling); 1936 &cd_type_cmd_tso_mss, &cd_tunneling);
1945 1937
1946 if (tso < 0) 1938 if (tso < 0)
@@ -1961,17 +1953,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1961 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1953 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1962 tx_flags |= I40E_TX_FLAGS_CSUM; 1954 tx_flags |= I40E_TX_FLAGS_CSUM;
1963 1955
1964 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, 1956 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
1965 tx_ring, &cd_tunneling); 1957 tx_ring, &cd_tunneling);
1966 } 1958 }
1967 1959
1968 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 1960 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
1969 cd_tunneling, cd_l2tag2); 1961 cd_tunneling, cd_l2tag2);
1970 1962
1971 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 1963 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
1972 td_cmd, td_offset); 1964 td_cmd, td_offset);
1973
1974 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
1975 1965
1976 return NETDEV_TX_OK; 1966 return NETDEV_TX_OK;
1977 1967
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 1e49bb1fbac1..e7a34f899f2c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
138#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) 138#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
139#define I40E_TX_FLAGS_FSO (u32)(1 << 7) 139#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
140#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) 140#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
141#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
141#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 142#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
142#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 143#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
143#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 144#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
145 146
146struct i40e_tx_buffer { 147struct i40e_tx_buffer {
147 struct i40e_tx_desc *next_to_watch; 148 struct i40e_tx_desc *next_to_watch;
148 unsigned long time_stamp;
149 union { 149 union {
150 struct sk_buff *skb; 150 struct sk_buff *skb;
151 void *raw_buf; 151 void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ec9d83a93379..c463ec41579c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
1108 /* flow director stats */ 1108 /* flow director stats */
1109 u64 fd_atr_match; 1109 u64 fd_atr_match;
1110 u64 fd_sb_match; 1110 u64 fd_sb_match;
1111 u64 fd_atr_tunnel_match;
1111 /* EEE LPI */ 1112 /* EEE LPI */
1112 u32 tx_lpi_status; 1113 u32 tx_lpi_status;
1113 u32 rx_lpi_status; 1114 u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9f6fb19062a0..9a1d0f142b09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2594 struct ixgbe_hw *hw = &adapter->hw; 2594 struct ixgbe_hw *hw = &adapter->hw;
2595 struct ixgbe_fdir_filter *input; 2595 struct ixgbe_fdir_filter *input;
2596 union ixgbe_atr_input mask; 2596 union ixgbe_atr_input mask;
2597 u8 queue;
2597 int err; 2598 int err;
2598 2599
2599 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 2600 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2600 return -EOPNOTSUPP; 2601 return -EOPNOTSUPP;
2601 2602
2602 /* 2603 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2603 * Don't allow programming if the action is a queue greater than 2604 * we use the drop index.
2604 * the number of online Rx queues.
2605 */ 2605 */
2606 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 2606 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2607 (fsp->ring_cookie >= adapter->num_rx_queues)) 2607 queue = IXGBE_FDIR_DROP_QUEUE;
2608 return -EINVAL; 2608 } else {
2609 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2610 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2611
2612 if (!vf && (ring >= adapter->num_rx_queues))
2613 return -EINVAL;
2614 else if (vf &&
2615 ((vf > adapter->num_vfs) ||
2616 ring >= adapter->num_rx_queues_per_pool))
2617 return -EINVAL;
2618
2619 /* Map the ring onto the absolute queue index */
2620 if (!vf)
2621 queue = adapter->rx_ring[ring]->reg_idx;
2622 else
2623 queue = ((vf - 1) *
2624 adapter->num_rx_queues_per_pool) + ring;
2625 }
2609 2626
2610 /* Don't allow indexes to exist outside of available space */ 2627 /* Don't allow indexes to exist outside of available space */
2611 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { 2628 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2683 2700
2684 /* program filters to filter memory */ 2701 /* program filters to filter memory */
2685 err = ixgbe_fdir_write_perfect_filter_82599(hw, 2702 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2686 &input->filter, input->sw_idx, 2703 &input->filter, input->sw_idx, queue);
2687 (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2688 IXGBE_FDIR_DROP_QUEUE :
2689 adapter->rx_ring[input->action]->reg_idx);
2690 if (err) 2704 if (err)
2691 goto err_out_w_lock; 2705 goto err_out_w_lock;
2692 2706