aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-06-10 23:25:52 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-10 23:25:52 -0400
commit27fa589de5d74f4d5a9b8dcab632e7370c8b4fc9 (patch)
tree715d9c655b189e9ffc6b12b325a2b777c9116ac5 /drivers/net/ethernet
parentb78370c021c9d52721c7f96fbb3e10f5b2f428d3 (diff)
parente8607ef52642b7f4534b099b76c678b76f514a21 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2014-06-09 This series contains more updates to i40e and i40evf. Shannon adds checks for error status bits on the admin event queue and provides notification if seen. Cleans up unused variable and memory allocation which was used earlier in driver development and is no longer needed. Also fixes the driver to not complain about removing non-existent MAC addresses. Bumps the driver versions for both i40e and i40evf. Catherine fixes a function header comment to make sure the comment correctly reflects the function name. Mitch adds code to allow for additional VSIs since the number of VSIs that the firmware reports to us is a guaranteed minimum, not an absolute maximum. The hardware actually supports for more than the reported value, which we often need. Implements anti-spoofing for VFs for both MAC addresses and VLANs, as well as enable this feature by default for all VFs. Anjali changes the interrupt distribution policy to change the way resources for special features are handled. Fixes the driver to not fall back to one queue if the only feature enabled is ATR, since FD_SB and FD_ATR need to be checked independently in order to decide if we will support multiple queue or not. Allows the RSS table entry range and GPS to be any number, not necessarily a power of 2 because hardware does not restrict us to use a power of 2 GPS in the case of RSS as long as we are not sharing the RSS table with another VSI (VMDq). Frank modifies the driver to keep SR-IOV enabled in the case that RSS, VMFq, FD_SB and DCB are disabled so that SR-IOV does not get turned off unnecessarily. Jesse fixes a bug in receive checksum where the driver was not marking packets with bad checksums correctly, especially IPv6 packets with a bad checksum. To do this correctly, we need a define that may be set by hardware in rare cases. Greg fixes the driver to delete all the old and stale MAC filters for the VF VSI when the host administrator changes the VF MAC address from under its feet. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c177
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c76
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c74
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c4
12 files changed, 303 insertions, 113 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index dc6d7c6fb060..2af28fd37bff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
72#define I40E_MIN_NUM_DESCRIPTORS 64 72#define I40E_MIN_NUM_DESCRIPTORS 64
73#define I40E_MIN_MSIX 2 73#define I40E_MIN_MSIX 2
74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ 74#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
75#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
75#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ 76#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
76#define I40E_DEFAULT_QUEUES_PER_VF 4 77#define I40E_DEFAULT_QUEUES_PER_VF 4
77#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ 78#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -215,6 +216,7 @@ struct i40e_pf {
215 u16 rss_size; /* num queues in the RSS array */ 216 u16 rss_size; /* num queues in the RSS array */
216 u16 rss_size_max; /* HW defined max RSS queues */ 217 u16 rss_size_max; /* HW defined max RSS queues */
217 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ 218 u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
219 u16 num_alloc_vsi; /* num VSIs this driver supports */
218 u8 atr_sample_rate; 220 u8 atr_sample_rate;
219 bool wol_en; 221 bool wol_en;
220 222
@@ -295,7 +297,6 @@ struct i40e_pf {
295 u16 pf_seid; 297 u16 pf_seid;
296 u16 main_vsi_seid; 298 u16 main_vsi_seid;
297 u16 mac_seid; 299 u16 mac_seid;
298 struct i40e_aqc_get_switch_config_data *sw_config;
299 struct kobject *switch_kobj; 300 struct kobject *switch_kobj;
300#ifdef CONFIG_DEBUG_FS 301#ifdef CONFIG_DEBUG_FS
301 struct dentry *i40e_dbg_pf; 302 struct dentry *i40e_dbg_pf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103abfd0d..871831a535d0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
232 struct i40e_ieee_app_priority_table *app) 232 struct i40e_ieee_app_priority_table *app)
233{ 233{
234 int v, err; 234 int v, err;
235 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 235 for (v = 0; v < pf->num_alloc_vsi; v++) {
236 if (pf->vsi[v] && pf->vsi[v]->netdev) { 236 if (pf->vsi[v] && pf->vsi[v]->netdev) {
237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); 237 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
238 if (err) 238 if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 1bd0adb38735..cffdfc21290f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
45 if (seid < 0) 45 if (seid < 0)
46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); 46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
47 else 47 else
48 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 48 for (i = 0; i < pf->num_alloc_vsi; i++)
49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) 49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
50 return pf->vsi[i]; 50 return pf->vsi[i];
51 51
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
843{ 843{
844 int i; 844 int i;
845 845
846 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 846 for (i = 0; i < pf->num_alloc_vsi; i++)
847 if (pf->vsi[i]) 847 if (pf->vsi[i])
848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", 848 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
849 i, pf->vsi[i]->seid); 849 i, pf->vsi[i]->seid);
@@ -1526,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1526 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); 1526 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1527 if (cnt == 0) { 1527 if (cnt == 0) {
1528 int i; 1528 int i;
1529 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 1529 for (i = 0; i < pf->num_alloc_vsi; i++)
1530 i40e_vsi_reset_stats(pf->vsi[i]); 1530 i40e_vsi_reset_stats(pf->vsi[i]);
1531 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); 1531 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1532 } else if (cnt == 1) { 1532 } else if (cnt == 1) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index df3917b68c99..b16c25111552 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -119,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
119 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 119 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
120 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 120 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
121 I40E_PF_STAT("tx_timeout", tx_timeout_count), 121 I40E_PF_STAT("tx_timeout", tx_timeout_count),
122 I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
122 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 123 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
123 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 124 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
124 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 125 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 8c16e185de81..145cb9fc1516 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_VERSION_MAJOR 0 40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 4 41#define DRV_VERSION_MINOR 4
42#define DRV_VERSION_BUILD 5 42#define DRV_VERSION_BUILD 7
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -652,7 +652,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
652 return; 652 return;
653 653
654 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ 654 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
655 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 655 for (v = 0; v < pf->num_alloc_vsi; v++) {
656 struct i40e_vsi *vsi = pf->vsi[v]; 656 struct i40e_vsi *vsi = pf->vsi[v];
657 657
658 if (!vsi || !vsi->tx_rings[0]) 658 if (!vsi || !vsi->tx_rings[0])
@@ -706,7 +706,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
706 } 706 }
707 707
708 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ 708 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
709 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 709 for (v = 0; v < pf->num_alloc_vsi; v++) {
710 struct i40e_vsi *vsi = pf->vsi[v]; 710 struct i40e_vsi *vsi = pf->vsi[v];
711 711
712 if (!vsi || !vsi->tx_rings[0]) 712 if (!vsi || !vsi->tx_rings[0])
@@ -1366,7 +1366,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1366 vsi->tc_config.numtc = numtc; 1366 vsi->tc_config.numtc = numtc;
1367 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1367 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1368 /* Number of queues per enabled TC */ 1368 /* Number of queues per enabled TC */
1369 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); 1369 num_tc_qps = vsi->alloc_queue_pairs/numtc;
1370 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1370 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1371 1371
1372 /* Setup queue offset/count for all TCs for given VSI */ 1372 /* Setup queue offset/count for all TCs for given VSI */
@@ -1595,7 +1595,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1595 num_del = 0; 1595 num_del = 0;
1596 memset(del_list, 0, sizeof(*del_list)); 1596 memset(del_list, 0, sizeof(*del_list));
1597 1597
1598 if (aq_ret) 1598 if (aq_ret &&
1599 pf->hw.aq.asq_last_status !=
1600 I40E_AQ_RC_ENOENT)
1599 dev_info(&pf->pdev->dev, 1601 dev_info(&pf->pdev->dev,
1600 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", 1602 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1601 aq_ret, 1603 aq_ret,
@@ -1607,7 +1609,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1607 del_list, num_del, NULL); 1609 del_list, num_del, NULL);
1608 num_del = 0; 1610 num_del = 0;
1609 1611
1610 if (aq_ret) 1612 if (aq_ret &&
1613 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1611 dev_info(&pf->pdev->dev, 1614 dev_info(&pf->pdev->dev,
1612 "ignoring delete macvlan error, err %d, aq_err %d\n", 1615 "ignoring delete macvlan error, err %d, aq_err %d\n",
1613 aq_ret, pf->hw.aq.asq_last_status); 1616 aq_ret, pf->hw.aq.asq_last_status);
@@ -1734,7 +1737,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1734 return; 1737 return;
1735 pf->flags &= ~I40E_FLAG_FILTER_SYNC; 1738 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1736 1739
1737 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 1740 for (v = 0; v < pf->num_alloc_vsi; v++) {
1738 if (pf->vsi[v] && 1741 if (pf->vsi[v] &&
1739 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) 1742 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1740 i40e_sync_vsi_filters(pf->vsi[v]); 1743 i40e_sync_vsi_filters(pf->vsi[v]);
@@ -3524,7 +3527,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3524 int i; 3527 int i;
3525 3528
3526 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3529 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3527 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 3530 for (i = 0; i < pf->num_alloc_vsi; i++)
3528 if (pf->vsi[i]) 3531 if (pf->vsi[i])
3529 i40e_vsi_free_q_vectors(pf->vsi[i]); 3532 i40e_vsi_free_q_vectors(pf->vsi[i]);
3530 i40e_reset_interrupt_capability(pf); 3533 i40e_reset_interrupt_capability(pf);
@@ -3614,7 +3617,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3614{ 3617{
3615 int v; 3618 int v;
3616 3619
3617 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3620 for (v = 0; v < pf->num_alloc_vsi; v++) {
3618 if (pf->vsi[v]) 3621 if (pf->vsi[v])
3619 i40e_quiesce_vsi(pf->vsi[v]); 3622 i40e_quiesce_vsi(pf->vsi[v]);
3620 } 3623 }
@@ -3628,7 +3631,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3628{ 3631{
3629 int v; 3632 int v;
3630 3633
3631 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 3634 for (v = 0; v < pf->num_alloc_vsi; v++) {
3632 if (pf->vsi[v]) 3635 if (pf->vsi[v])
3633 i40e_unquiesce_vsi(pf->vsi[v]); 3636 i40e_unquiesce_vsi(pf->vsi[v]);
3634 } 3637 }
@@ -4069,7 +4072,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4069 } 4072 }
4070 4073
4071 /* Update each VSI */ 4074 /* Update each VSI */
4072 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4075 for (v = 0; v < pf->num_alloc_vsi; v++) {
4073 if (!pf->vsi[v]) 4076 if (!pf->vsi[v])
4074 continue; 4077 continue;
4075 4078
@@ -4592,7 +4595,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4592 /* Find the VSI(s) that requested a re-init */ 4595 /* Find the VSI(s) that requested a re-init */
4593 dev_info(&pf->pdev->dev, 4596 dev_info(&pf->pdev->dev,
4594 "VSI reinit requested\n"); 4597 "VSI reinit requested\n");
4595 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4598 for (v = 0; v < pf->num_alloc_vsi; v++) {
4596 struct i40e_vsi *vsi = pf->vsi[v]; 4599 struct i40e_vsi *vsi = pf->vsi[v];
4597 if (vsi != NULL && 4600 if (vsi != NULL &&
4598 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { 4601 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4919,7 +4922,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4919 i40e_veb_link_event(pf->veb[i], link_up); 4922 i40e_veb_link_event(pf->veb[i], link_up);
4920 4923
4921 /* ... now the local VSIs */ 4924 /* ... now the local VSIs */
4922 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 4925 for (i = 0; i < pf->num_alloc_vsi; i++)
4923 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) 4926 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4924 i40e_vsi_link_event(pf->vsi[i], link_up); 4927 i40e_vsi_link_event(pf->vsi[i], link_up);
4925} 4928}
@@ -4976,7 +4979,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
4976 * for each q_vector 4979 * for each q_vector
4977 * force an interrupt 4980 * force an interrupt
4978 */ 4981 */
4979 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 4982 for (v = 0; v < pf->num_alloc_vsi; v++) {
4980 struct i40e_vsi *vsi = pf->vsi[v]; 4983 struct i40e_vsi *vsi = pf->vsi[v];
4981 int armed = 0; 4984 int armed = 0;
4982 4985
@@ -5026,7 +5029,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
5026 /* Update the stats for active netdevs so the network stack 5029 /* Update the stats for active netdevs so the network stack
5027 * can look at updated numbers whenever it cares to 5030 * can look at updated numbers whenever it cares to
5028 */ 5031 */
5029 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 5032 for (i = 0; i < pf->num_alloc_vsi; i++)
5030 if (pf->vsi[i] && pf->vsi[i]->netdev) 5033 if (pf->vsi[i] && pf->vsi[i]->netdev)
5031 i40e_update_stats(pf->vsi[i]); 5034 i40e_update_stats(pf->vsi[i]);
5032 5035
@@ -5132,11 +5135,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5132 u16 pending, i = 0; 5135 u16 pending, i = 0;
5133 i40e_status ret; 5136 i40e_status ret;
5134 u16 opcode; 5137 u16 opcode;
5138 u32 oldval;
5135 u32 val; 5139 u32 val;
5136 5140
5137 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) 5141 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5138 return; 5142 return;
5139 5143
5144 /* check for error indications */
5145 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5146 oldval = val;
5147 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5148 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5149 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5150 }
5151 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5152 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5153 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5154 }
5155 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5156 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5157 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5158 }
5159 if (oldval != val)
5160 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5161
5162 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5163 oldval = val;
5164 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5165 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5166 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5167 }
5168 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5169 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5170 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5171 }
5172 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5173 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5174 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5175 }
5176 if (oldval != val)
5177 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5178
5140 event.msg_size = I40E_MAX_AQ_BUF_SIZE; 5179 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
5141 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 5180 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5142 if (!event.msg_buf) 5181 if (!event.msg_buf)
@@ -5242,7 +5281,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5242 int ret; 5281 int ret;
5243 5282
5244 /* build VSI that owns this VEB, temporarily attached to base VEB */ 5283 /* build VSI that owns this VEB, temporarily attached to base VEB */
5245 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { 5284 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5246 if (pf->vsi[v] && 5285 if (pf->vsi[v] &&
5247 pf->vsi[v]->veb_idx == veb->idx && 5286 pf->vsi[v]->veb_idx == veb->idx &&
5248 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { 5287 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5272,7 +5311,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
5272 goto end_reconstitute; 5311 goto end_reconstitute;
5273 5312
5274 /* create the remaining VSIs attached to this VEB */ 5313 /* create the remaining VSIs attached to this VEB */
5275 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5314 for (v = 0; v < pf->num_alloc_vsi; v++) {
5276 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) 5315 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5277 continue; 5316 continue;
5278 5317
@@ -5385,7 +5424,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5385 5424
5386 /* find existing VSI and see if it needs configuring */ 5425 /* find existing VSI and see if it needs configuring */
5387 vsi = NULL; 5426 vsi = NULL;
5388 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5427 for (i = 0; i < pf->num_alloc_vsi; i++) {
5389 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5428 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5390 vsi = pf->vsi[i]; 5429 vsi = pf->vsi[i];
5391 break; 5430 break;
@@ -5415,7 +5454,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5415 int i; 5454 int i;
5416 5455
5417 i40e_fdir_filter_exit(pf); 5456 i40e_fdir_filter_exit(pf);
5418 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5457 for (i = 0; i < pf->num_alloc_vsi; i++) {
5419 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5458 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5420 i40e_vsi_release(pf->vsi[i]); 5459 i40e_vsi_release(pf->vsi[i]);
5421 break; 5460 break;
@@ -5444,7 +5483,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5444 /* quiesce the VSIs and their queues that are not already DOWN */ 5483 /* quiesce the VSIs and their queues that are not already DOWN */
5445 i40e_pf_quiesce_all_vsi(pf); 5484 i40e_pf_quiesce_all_vsi(pf);
5446 5485
5447 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { 5486 for (v = 0; v < pf->num_alloc_vsi; v++) {
5448 if (pf->vsi[v]) 5487 if (pf->vsi[v])
5449 pf->vsi[v]->seid = 0; 5488 pf->vsi[v]->seid = 0;
5450 } 5489 }
@@ -5924,15 +5963,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5924 * find next empty vsi slot, looping back around if necessary 5963 * find next empty vsi slot, looping back around if necessary
5925 */ 5964 */
5926 i = pf->next_vsi; 5965 i = pf->next_vsi;
5927 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) 5966 while (i < pf->num_alloc_vsi && pf->vsi[i])
5928 i++; 5967 i++;
5929 if (i >= pf->hw.func_caps.num_vsis) { 5968 if (i >= pf->num_alloc_vsi) {
5930 i = 0; 5969 i = 0;
5931 while (i < pf->next_vsi && pf->vsi[i]) 5970 while (i < pf->next_vsi && pf->vsi[i])
5932 i++; 5971 i++;
5933 } 5972 }
5934 5973
5935 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { 5974 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
5936 vsi_idx = i; /* Found one! */ 5975 vsi_idx = i; /* Found one! */
5937 } else { 5976 } else {
5938 ret = -ENODEV; 5977 ret = -ENODEV;
@@ -6189,6 +6228,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
6189 for (i = 0; i < v_budget; i++) 6228 for (i = 0; i < v_budget; i++)
6190 pf->msix_entries[i].entry = i; 6229 pf->msix_entries[i].entry = i;
6191 vec = i40e_reserve_msix_vectors(pf, v_budget); 6230 vec = i40e_reserve_msix_vectors(pf, v_budget);
6231
6232 if (vec != v_budget) {
6233 /* If we have limited resources, we will start with no vectors
6234 * for the special features and then allocate vectors to some
6235 * of these features based on the policy and at the end disable
6236 * the features that did not get any vectors.
6237 */
6238 pf->num_vmdq_msix = 0;
6239 }
6240
6192 if (vec < I40E_MIN_MSIX) { 6241 if (vec < I40E_MIN_MSIX) {
6193 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; 6242 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6194 kfree(pf->msix_entries); 6243 kfree(pf->msix_entries);
@@ -6197,27 +6246,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
6197 6246
6198 } else if (vec == I40E_MIN_MSIX) { 6247 } else if (vec == I40E_MIN_MSIX) {
6199 /* Adjust for minimal MSIX use */ 6248 /* Adjust for minimal MSIX use */
6200 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
6201 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6202 pf->num_vmdq_vsis = 0; 6249 pf->num_vmdq_vsis = 0;
6203 pf->num_vmdq_qps = 0; 6250 pf->num_vmdq_qps = 0;
6204 pf->num_vmdq_msix = 0;
6205 pf->num_lan_qps = 1; 6251 pf->num_lan_qps = 1;
6206 pf->num_lan_msix = 1; 6252 pf->num_lan_msix = 1;
6207 6253
6208 } else if (vec != v_budget) { 6254 } else if (vec != v_budget) {
6255 /* reserve the misc vector */
6256 vec--;
6257
6209 /* Scale vector usage down */ 6258 /* Scale vector usage down */
6210 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ 6259 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6211 vec--; /* reserve the misc vector */ 6260 pf->num_vmdq_vsis = 1;
6212 6261
6213 /* partition out the remaining vectors */ 6262 /* partition out the remaining vectors */
6214 switch (vec) { 6263 switch (vec) {
6215 case 2: 6264 case 2:
6216 pf->num_vmdq_vsis = 1;
6217 pf->num_lan_msix = 1; 6265 pf->num_lan_msix = 1;
6218 break; 6266 break;
6219 case 3: 6267 case 3:
6220 pf->num_vmdq_vsis = 1;
6221 pf->num_lan_msix = 2; 6268 pf->num_lan_msix = 2;
6222 break; 6269 break;
6223 default: 6270 default:
@@ -6229,6 +6276,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
6229 } 6276 }
6230 } 6277 }
6231 6278
6279 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6280 (pf->num_vmdq_msix == 0)) {
6281 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6282 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6283 }
6232 return err; 6284 return err;
6233} 6285}
6234 6286
@@ -6446,7 +6498,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6446 return 0; 6498 return 0;
6447 6499
6448 queue_count = min_t(int, queue_count, pf->rss_size_max); 6500 queue_count = min_t(int, queue_count, pf->rss_size_max);
6449 queue_count = rounddown_pow_of_two(queue_count);
6450 6501
6451 if (queue_count != pf->rss_size) { 6502 if (queue_count != pf->rss_size) {
6452 i40e_prep_for_reset(pf); 6503 i40e_prep_for_reset(pf);
@@ -6502,7 +6553,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
6502 if (pf->hw.func_caps.rss) { 6553 if (pf->hw.func_caps.rss) {
6503 pf->flags |= I40E_FLAG_RSS_ENABLED; 6554 pf->flags |= I40E_FLAG_RSS_ENABLED;
6504 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); 6555 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
6505 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
6506 } else { 6556 } else {
6507 pf->rss_size = 1; 6557 pf->rss_size = 1;
6508 } 6558 }
@@ -6848,6 +6898,7 @@ static const struct net_device_ops i40e_netdev_ops = {
6848 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 6898 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
6849 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6899 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6850 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 6900 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6901 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
6851#ifdef CONFIG_I40E_VXLAN 6902#ifdef CONFIG_I40E_VXLAN
6852 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6903 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6853 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6904 .ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -7082,6 +7133,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7082 7133
7083 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); 7134 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
7084 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; 7135 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7136 if (pf->vf[vsi->vf_id].spoofchk) {
7137 ctxt.info.valid_sections |=
7138 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7139 ctxt.info.sec_flags |=
7140 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7141 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7142 }
7085 /* Setup the VSI tx/rx queue map for TC0 only for now */ 7143 /* Setup the VSI tx/rx queue map for TC0 only for now */
7086 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); 7144 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7087 break; 7145 break;
@@ -7193,7 +7251,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
7193 * the orphan VEBs yet. We'll wait for an explicit remove request 7251 * the orphan VEBs yet. We'll wait for an explicit remove request
7194 * from up the network stack. 7252 * from up the network stack.
7195 */ 7253 */
7196 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7254 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
7197 if (pf->vsi[i] && 7255 if (pf->vsi[i] &&
7198 pf->vsi[i]->uplink_seid == uplink_seid && 7256 pf->vsi[i]->uplink_seid == uplink_seid &&
7199 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { 7257 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7372,7 +7430,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7372 7430
7373 if (!veb && uplink_seid != pf->mac_seid) { 7431 if (!veb && uplink_seid != pf->mac_seid) {
7374 7432
7375 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7433 for (i = 0; i < pf->num_alloc_vsi; i++) {
7376 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { 7434 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7377 vsi = pf->vsi[i]; 7435 vsi = pf->vsi[i];
7378 break; 7436 break;
@@ -7615,7 +7673,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
7615 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing 7673 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7616 * the VEB itself, so don't use (*branch) after this loop. 7674 * the VEB itself, so don't use (*branch) after this loop.
7617 */ 7675 */
7618 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7676 for (i = 0; i < pf->num_alloc_vsi; i++) {
7619 if (!pf->vsi[i]) 7677 if (!pf->vsi[i])
7620 continue; 7678 continue;
7621 if (pf->vsi[i]->uplink_seid == branch_seid && 7679 if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7667,7 +7725,7 @@ void i40e_veb_release(struct i40e_veb *veb)
7667 pf = veb->pf; 7725 pf = veb->pf;
7668 7726
7669 /* find the remaining VSI and check for extras */ 7727 /* find the remaining VSI and check for extras */
7670 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 7728 for (i = 0; i < pf->num_alloc_vsi; i++) {
7671 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { 7729 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7672 n++; 7730 n++;
7673 vsi = pf->vsi[i]; 7731 vsi = pf->vsi[i];
@@ -7779,10 +7837,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7779 } 7837 }
7780 7838
7781 /* make sure there is such a vsi and uplink */ 7839 /* make sure there is such a vsi and uplink */
7782 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) 7840 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
7783 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) 7841 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7784 break; 7842 break;
7785 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { 7843 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
7786 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", 7844 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7787 vsi_seid); 7845 vsi_seid);
7788 return NULL; 7846 return NULL;
@@ -7954,15 +8012,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7954 "header: %d reported %d total\n", 8012 "header: %d reported %d total\n",
7955 num_reported, num_total); 8013 num_reported, num_total);
7956 8014
7957 if (num_reported) {
7958 int sz = sizeof(*sw_config) * num_reported;
7959
7960 kfree(pf->sw_config);
7961 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7962 if (pf->sw_config)
7963 memcpy(pf->sw_config, sw_config, sz);
7964 }
7965
7966 for (i = 0; i < num_reported; i++) { 8015 for (i = 0; i < num_reported; i++) {
7967 struct i40e_aqc_switch_config_element_resp *ele = 8016 struct i40e_aqc_switch_config_element_resp *ele =
7968 &sw_config->element[i]; 8017 &sw_config->element[i];
@@ -8129,9 +8178,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8129 queues_left = pf->hw.func_caps.num_tx_qp; 8178 queues_left = pf->hw.func_caps.num_tx_qp;
8130 8179
8131 if ((queues_left == 1) || 8180 if ((queues_left == 1) ||
8132 !(pf->flags & I40E_FLAG_MSIX_ENABLED) || 8181 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
8133 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
8134 I40E_FLAG_DCB_ENABLED))) {
8135 /* one qp for PF, no queues for anything else */ 8182 /* one qp for PF, no queues for anything else */
8136 queues_left = 0; 8183 queues_left = 0;
8137 pf->rss_size = pf->num_lan_qps = 1; 8184 pf->rss_size = pf->num_lan_qps = 1;
@@ -8143,6 +8190,19 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
8143 I40E_FLAG_DCB_ENABLED | 8190 I40E_FLAG_DCB_ENABLED |
8144 I40E_FLAG_SRIOV_ENABLED | 8191 I40E_FLAG_SRIOV_ENABLED |
8145 I40E_FLAG_VMDQ_ENABLED); 8192 I40E_FLAG_VMDQ_ENABLED);
8193 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8194 I40E_FLAG_FD_SB_ENABLED |
8195 I40E_FLAG_FD_ATR_ENABLED |
8196 I40E_FLAG_DCB_ENABLED))) {
8197 /* one qp for PF */
8198 pf->rss_size = pf->num_lan_qps = 1;
8199 queues_left -= pf->num_lan_qps;
8200
8201 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8202 I40E_FLAG_FD_SB_ENABLED |
8203 I40E_FLAG_FD_ATR_ENABLED |
8204 I40E_FLAG_DCB_ENABLED |
8205 I40E_FLAG_VMDQ_ENABLED);
8146 } else { 8206 } else {
8147 /* Not enough queues for all TCs */ 8207 /* Not enough queues for all TCs */
8148 if ((pf->flags & I40E_FLAG_DCB_ENABLED) && 8208 if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
@@ -8448,10 +8508,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8448 i40e_determine_queue_usage(pf); 8508 i40e_determine_queue_usage(pf);
8449 i40e_init_interrupt_scheme(pf); 8509 i40e_init_interrupt_scheme(pf);
8450 8510
8451 /* Set up the *vsi struct based on the number of VSIs in the HW, 8511 /* The number of VSIs reported by the FW is the minimum guaranteed
8452 * and set up our local tracking of the MAIN PF vsi. 8512 * to us; HW supports far more and we share the remaining pool with
8513 * the other PFs. We allocate space for more than the guarantee with
8514 * the understanding that we might not get them all later.
8453 */ 8515 */
8454 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 8516 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
8517 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
8518 else
8519 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
8520
8521 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
8522 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
8455 pf->vsi = kzalloc(len, GFP_KERNEL); 8523 pf->vsi = kzalloc(len, GFP_KERNEL);
8456 if (!pf->vsi) { 8524 if (!pf->vsi) {
8457 err = -ENOMEM; 8525 err = -ENOMEM;
@@ -8464,7 +8532,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8464 goto err_vsis; 8532 goto err_vsis;
8465 } 8533 }
8466 /* if FDIR VSI was set up, start it now */ 8534 /* if FDIR VSI was set up, start it now */
8467 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8535 for (i = 0; i < pf->num_alloc_vsi; i++) {
8468 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 8536 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8469 i40e_vsi_open(pf->vsi[i]); 8537 i40e_vsi_open(pf->vsi[i]);
8470 break; 8538 break;
@@ -8659,7 +8727,7 @@ static void i40e_remove(struct pci_dev *pdev)
8659 8727
8660 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 8728 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8661 i40e_clear_interrupt_scheme(pf); 8729 i40e_clear_interrupt_scheme(pf);
8662 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 8730 for (i = 0; i < pf->num_alloc_vsi; i++) {
8663 if (pf->vsi[i]) { 8731 if (pf->vsi[i]) {
8664 i40e_vsi_clear_rings(pf->vsi[i]); 8732 i40e_vsi_clear_rings(pf->vsi[i]);
8665 i40e_vsi_clear(pf->vsi[i]); 8733 i40e_vsi_clear(pf->vsi[i]);
@@ -8674,7 +8742,6 @@ static void i40e_remove(struct pci_dev *pdev)
8674 8742
8675 kfree(pf->qp_pile); 8743 kfree(pf->qp_pile);
8676 kfree(pf->irq_pile); 8744 kfree(pf->irq_pile);
8677 kfree(pf->sw_config);
8678 kfree(pf->vsi); 8745 kfree(pf->vsi);
8679 8746
8680 /* force a PF reset to clean anything leftover */ 8747 /* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d1a9a0512b93..d84f4275f470 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -62,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
62 62
63 /* find existing FDIR VSI */ 63 /* find existing FDIR VSI */
64 vsi = NULL; 64 vsi = NULL;
65 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 65 for (i = 0; i < pf->num_alloc_vsi; i++)
66 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) 66 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
67 vsi = pf->vsi[i]; 67 vsi = pf->vsi[i];
68 if (!vsi) 68 if (!vsi)
@@ -1193,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1193 u32 rx_error, 1193 u32 rx_error,
1194 u16 rx_ptype) 1194 u16 rx_ptype)
1195{ 1195{
1196 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1197 bool ipv4 = false, ipv6 = false;
1196 bool ipv4_tunnel, ipv6_tunnel; 1198 bool ipv4_tunnel, ipv6_tunnel;
1197 __wsum rx_udp_csum; 1199 __wsum rx_udp_csum;
1198 __sum16 csum;
1199 struct iphdr *iph; 1200 struct iphdr *iph;
1201 __sum16 csum;
1200 1202
1201 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 1203 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1202 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 1204 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1207,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1207 skb->ip_summed = CHECKSUM_NONE; 1209 skb->ip_summed = CHECKSUM_NONE;
1208 1210
1209 /* Rx csum enabled and ip headers found? */ 1211 /* Rx csum enabled and ip headers found? */
1210 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 1212 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1211 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1213 return;
1214
1215 /* did the hardware decode the packet and checksum? */
1216 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1217 return;
1218
1219 /* both known and outer_ip must be set for the below code to work */
1220 if (!(decoded.known && decoded.outer_ip))
1212 return; 1221 return;
1213 1222
1223 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1224 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1225 ipv4 = true;
1226 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1227 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1228 ipv6 = true;
1229
1230 if (ipv4 &&
1231 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1232 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1233 goto checksum_fail;
1234
1214 /* likely incorrect csum if alternate IP extension headers found */ 1235 /* likely incorrect csum if alternate IP extension headers found */
1215 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1236 if (ipv6 &&
1237 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
1238 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
1239 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1240 /* don't increment checksum err here, non-fatal err */
1216 return; 1241 return;
1217 1242
1218 /* IP or L4 or outmost IP checksum error */ 1243 /* there was some L4 error, count error and punt packet to the stack */
1219 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 1244 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
1220 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 1245 goto checksum_fail;
1221 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 1246
1222 vsi->back->hw_csum_rx_error++; 1247 /* handle packets that were not able to be checksummed due
1248 * to arrival speed, in this case the stack can compute
1249 * the csum.
1250 */
1251 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
1223 return; 1252 return;
1224 }
1225 1253
1254 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1255 * it in the driver, hardware does not do it for us.
1256 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1257 * so the total length of IPv4 header is IHL*4 bytes
1258 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1259 */
1226 if (ipv4_tunnel && 1260 if (ipv4_tunnel &&
1261 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
1227 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 1262 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1228 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1229 * it in the driver, hardware does not do it for us.
1230 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1231 * so the total length of IPv4 header is IHL*4 bytes
1232 */
1233 skb->transport_header = skb->mac_header + 1263 skb->transport_header = skb->mac_header +
1234 sizeof(struct ethhdr) + 1264 sizeof(struct ethhdr) +
1235 (ip_hdr(skb)->ihl * 4); 1265 (ip_hdr(skb)->ihl * 4);
@@ -1246,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1246 (skb->len - skb_transport_offset(skb)), 1276 (skb->len - skb_transport_offset(skb)),
1247 IPPROTO_UDP, rx_udp_csum); 1277 IPPROTO_UDP, rx_udp_csum);
1248 1278
1249 if (udp_hdr(skb)->check != csum) { 1279 if (udp_hdr(skb)->check != csum)
1250 vsi->back->hw_csum_rx_error++; 1280 goto checksum_fail;
1251 return;
1252 }
1253 } 1281 }
1254 1282
1255 skb->ip_summed = CHECKSUM_UNNECESSARY; 1283 skb->ip_summed = CHECKSUM_UNNECESSARY;
1284
1285 return;
1286
1287checksum_fail:
1288 vsi->back->hw_csum_rx_error++;
1256} 1289}
1257 1290
1258/** 1291/**
@@ -1429,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1429 /* ERR_MASK will only have valid bits if EOP set */ 1462 /* ERR_MASK will only have valid bits if EOP set */
1430 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 1463 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1431 dev_kfree_skb_any(skb); 1464 dev_kfree_skb_any(skb);
1465 /* TODO: shouldn't we increment a counter indicating the
1466 * drop?
1467 */
1432 goto next_desc; 1468 goto next_desc;
1433 } 1469 }
1434 1470
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 6c977d2d48e4..42bfb2aed765 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -541,7 +541,8 @@ enum i40e_rx_desc_error_bits {
541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
545}; 546};
546 547
547enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4e7634c83685..385a46f910d6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -899,6 +899,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
899 ret = -ENOMEM; 899 ret = -ENOMEM;
900 goto err_alloc; 900 goto err_alloc;
901 } 901 }
902 pf->vf = vfs;
902 903
903 /* apply default profile */ 904 /* apply default profile */
904 for (i = 0; i < num_alloc_vfs; i++) { 905 for (i = 0; i < num_alloc_vfs; i++) {
@@ -908,13 +909,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
908 909
909 /* assign default capabilities */ 910 /* assign default capabilities */
910 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); 911 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
912 vfs[i].spoofchk = true;
911 /* vf resources get allocated during reset */ 913 /* vf resources get allocated during reset */
912 i40e_reset_vf(&vfs[i], false); 914 i40e_reset_vf(&vfs[i], false);
913 915
914 /* enable vf vplan_qtable mappings */ 916 /* enable vf vplan_qtable mappings */
915 i40e_enable_vf_mappings(&vfs[i]); 917 i40e_enable_vf_mappings(&vfs[i]);
916 } 918 }
917 pf->vf = vfs;
918 pf->num_alloc_vfs = num_alloc_vfs; 919 pf->num_alloc_vfs = num_alloc_vfs;
919 920
920 i40e_enable_pf_switch_lb(pf); 921 i40e_enable_pf_switch_lb(pf);
@@ -2062,14 +2063,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2062 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, 2063 i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2063 true, false); 2064 true, false);
2064 2065
2065 /* add the new mac address */ 2066 /* Delete all the filters for this VSI - we're going to kill it
2066 f = i40e_add_filter(vsi, mac, vf->port_vlan_id, true, false); 2067 * anyway.
2067 if (!f) { 2068 */
2068 dev_err(&pf->pdev->dev, 2069 list_for_each_entry(f, &vsi->mac_filter_list, list)
2069 "Unable to add VF ucast filter\n"); 2070 i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2070 ret = -ENOMEM;
2071 goto error_param;
2072 }
2073 2071
2074 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); 2072 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2075 /* program mac filter */ 2073 /* program mac filter */
@@ -2328,7 +2326,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2328 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; 2326 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2329 else 2327 else
2330 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; 2328 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2331 2329 ivi->spoofchk = vf->spoofchk;
2332 ret = 0; 2330 ret = 0;
2333 2331
2334error_param: 2332error_param:
@@ -2395,3 +2393,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2395error_out: 2393error_out:
2396 return ret; 2394 return ret;
2397} 2395}
2396
2397/**
2398 * i40e_ndo_set_vf_spoofchk
2399 * @netdev: network interface device structure
2400 * @vf_id: vf identifier
2401 * @enable: flag to enable or disable feature
2402 *
2403 * Enable or disable VF spoof checking
2404 **/
2405int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
2406{
2407 struct i40e_netdev_priv *np = netdev_priv(netdev);
2408 struct i40e_vsi *vsi = np->vsi;
2409 struct i40e_pf *pf = vsi->back;
2410 struct i40e_vsi_context ctxt;
2411 struct i40e_hw *hw = &pf->hw;
2412 struct i40e_vf *vf;
2413 int ret = 0;
2414
2415 /* validate the request */
2416 if (vf_id >= pf->num_alloc_vfs) {
2417 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2418 ret = -EINVAL;
2419 goto out;
2420 }
2421
2422 vf = &(pf->vf[vf_id]);
2423
2424 if (enable == vf->spoofchk)
2425 goto out;
2426
2427 vf->spoofchk = enable;
2428 memset(&ctxt, 0, sizeof(ctxt));
2429 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
2430 ctxt.pf_num = pf->hw.pf_id;
2431 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2432 if (enable)
2433 ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
2434 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2435 if (ret) {
2436 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2437 ret);
2438 ret = -EIO;
2439 }
2440out:
2441 return ret;
2442}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5a559be4ba2c..63e7e0d81ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -101,6 +101,7 @@ struct i40e_vf {
101 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ 101 unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
102 bool link_forced; 102 bool link_forced;
103 bool link_up; /* only valid if vf link is forced */ 103 bool link_up; /* only valid if vf link is forced */
104 bool spoofchk;
104}; 105};
105 106
106void i40e_free_vfs(struct i40e_pf *pf); 107void i40e_free_vfs(struct i40e_pf *pf);
@@ -121,6 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
121int i40e_ndo_get_vf_config(struct net_device *netdev, 122int i40e_ndo_get_vf_config(struct net_device *netdev,
122 int vf_id, struct ifla_vf_info *ivi); 123 int vf_id, struct ifla_vf_info *ivi);
123int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 124int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
125int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
124 126
125void i40e_vc_notify_link_state(struct i40e_pf *pf); 127void i40e_vc_notify_link_state(struct i40e_pf *pf);
126void i40e_vc_notify_reset(struct i40e_pf *pf); 128void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ae089df7df19..48ebb6cd69f2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -728,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
728 u32 rx_error, 728 u32 rx_error,
729 u16 rx_ptype) 729 u16 rx_ptype)
730{ 730{
731 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
732 bool ipv4 = false, ipv6 = false;
731 bool ipv4_tunnel, ipv6_tunnel; 733 bool ipv4_tunnel, ipv6_tunnel;
732 __wsum rx_udp_csum; 734 __wsum rx_udp_csum;
733 __sum16 csum;
734 struct iphdr *iph; 735 struct iphdr *iph;
736 __sum16 csum;
735 737
736 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && 738 ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
737 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); 739 (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -742,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
742 skb->ip_summed = CHECKSUM_NONE; 744 skb->ip_summed = CHECKSUM_NONE;
743 745
744 /* Rx csum enabled and ip headers found? */ 746 /* Rx csum enabled and ip headers found? */
745 if (!(vsi->netdev->features & NETIF_F_RXCSUM && 747 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
746 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 748 return;
749
750 /* did the hardware decode the packet and checksum? */
751 if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
752 return;
753
754 /* both known and outer_ip must be set for the below code to work */
755 if (!(decoded.known && decoded.outer_ip))
747 return; 756 return;
748 757
758 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
759 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
760 ipv4 = true;
761 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
762 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
763 ipv6 = true;
764
765 if (ipv4 &&
766 (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
767 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
768 goto checksum_fail;
769
749 /* likely incorrect csum if alternate IP extension headers found */ 770 /* likely incorrect csum if alternate IP extension headers found */
750 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 771 if (ipv6 &&
772 decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
773 rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
774 rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
775 /* don't increment checksum err here, non-fatal err */
751 return; 776 return;
752 777
753 /* IP or L4 or outmost IP checksum error */ 778 /* there was some L4 error, count error and punt packet to the stack */
754 if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | 779 if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
755 (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | 780 goto checksum_fail;
756 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { 781
757 vsi->back->hw_csum_rx_error++; 782 /* handle packets that were not able to be checksummed due
783 * to arrival speed, in this case the stack can compute
784 * the csum.
785 */
786 if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
758 return; 787 return;
759 }
760 788
789 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
790 * it in the driver, hardware does not do it for us.
791 * Since L3L4P bit was set we assume a valid IHL value (>=5)
792 * so the total length of IPv4 header is IHL*4 bytes
793 * The UDP_0 bit *may* bet set if the *inner* header is UDP
794 */
761 if (ipv4_tunnel && 795 if (ipv4_tunnel &&
796 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
762 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 797 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
763 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
764 * it in the driver, hardware does not do it for us.
765 * Since L3L4P bit was set we assume a valid IHL value (>=5)
766 * so the total length of IPv4 header is IHL*4 bytes
767 */
768 skb->transport_header = skb->mac_header + 798 skb->transport_header = skb->mac_header +
769 sizeof(struct ethhdr) + 799 sizeof(struct ethhdr) +
770 (ip_hdr(skb)->ihl * 4); 800 (ip_hdr(skb)->ihl * 4);
@@ -781,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
781 (skb->len - skb_transport_offset(skb)), 811 (skb->len - skb_transport_offset(skb)),
782 IPPROTO_UDP, rx_udp_csum); 812 IPPROTO_UDP, rx_udp_csum);
783 813
784 if (udp_hdr(skb)->check != csum) { 814 if (udp_hdr(skb)->check != csum)
785 vsi->back->hw_csum_rx_error++; 815 goto checksum_fail;
786 return;
787 }
788 } 816 }
789 817
790 skb->ip_summed = CHECKSUM_UNNECESSARY; 818 skb->ip_summed = CHECKSUM_UNNECESSARY;
819
820 return;
821
822checksum_fail:
823 vsi->back->hw_csum_rx_error++;
791} 824}
792 825
793/** 826/**
@@ -956,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
956 /* ERR_MASK will only have valid bits if EOP set */ 989 /* ERR_MASK will only have valid bits if EOP set */
957 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { 990 if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
958 dev_kfree_skb_any(skb); 991 dev_kfree_skb_any(skb);
992 /* TODO: shouldn't we increment a counter indicating the
993 * drop?
994 */
959 goto next_desc; 995 goto next_desc;
960 } 996 }
961 997
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index e3c9ebbe7ca2..0a7914d11b6a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -541,7 +541,8 @@ enum i40e_rx_desc_error_bits {
541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3, 541 I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4, 542 I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, 543 I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 544 I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
545 I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
545}; 546};
546 547
547enum i40e_rx_desc_error_l3l4e_fcoe_masks { 548enum i40e_rx_desc_error_l3l4e_fcoe_masks {
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index e8d2481d1849..d4157857360d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
36static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
37 "Intel(R) XL710 X710 Virtual Function Network Driver"; 37 "Intel(R) XL710 X710 Virtual Function Network Driver";
38 38
39#define DRV_VERSION "0.9.29" 39#define DRV_VERSION "0.9.31"
40const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
41static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
42 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -1395,7 +1395,7 @@ restart_watchdog:
1395} 1395}
1396 1396
1397/** 1397/**
1398 * i40evf_configure_rss - increment to next available tx queue 1398 * next_queue - increment to next available tx queue
1399 * @adapter: board private structure 1399 * @adapter: board private structure
1400 * @j: queue counter 1400 * @j: queue counter
1401 * 1401 *