aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-10-22 15:52:06 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-22 15:52:06 -0400
commit666d10085924d9dd4bcab7653f5d4971d8ea2498 (patch)
tree4f09b99a667451907c1dc19a0ae62e22a9c12a0d /drivers
parent0a6957e7d47096bbeedda4e1d926359eb487dcfc (diff)
parent1de046b97908dfca3aa3042dd72e3f3205c186c4 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates This series contains updates to i40e only. Jesse provides 6 patches against i40e. First is a patch to reduce CPU utilization by reducing read-flush to read in the hot path. Next couple of patches resolve coverity issues reported by Hannes Frederic Sowa <hannes@stressinduktion.org>. Then Jesse refactored i40e to cleanup functions which used cpu_to_xxx(foo) which caused a lot of line wrapping. Mitch provides 2 i40e patches. First fixes a panic when tx_rings[0] are not allocated, his second patch corrects a math error when assigning MSI-X vectors to VFs. The vectors-per-vf value reported by the hardware already conveniently reports one less than the actual value. Shannon provides 5 patches against i40e. His first patch corrects a number of little bugs in the error handling of irq setup, most of which ended up panicing the kernel. Next he fixes the overactive IRQ issue seen in testing and allows the use of the legacy interrupt. Shannon then provides a cleanup of the arguments declared at the beginning of each function. Then he provides a patch to make sure that there are really rings and queues before trying to dump information in them. Lastly he simplifies the code by using an already existing variable. Catherine provides an i40e patch to bump the version. v2: - Remove unneeded parenthesis in patch 3 based on feedback from Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> - Fix patch description for patch 11 based on feedback from Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c83
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
5 files changed, 146 insertions, 127 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index c06a76ca9aaa..49572dcdba87 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -545,6 +545,7 @@ static inline void i40e_dbg_init(void) {}
545static inline void i40e_dbg_exit(void) {} 545static inline void i40e_dbg_exit(void) {}
546#endif /* CONFIG_DEBUG_FS*/ 546#endif /* CONFIG_DEBUG_FS*/
547void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); 547void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
548void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
548int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 549int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
549void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 550void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
550int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 551int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 19e248ff6c77..ef4cb1cf31f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
151 size_t count, loff_t *ppos) 151 size_t count, loff_t *ppos)
152{ 152{
153 struct i40e_pf *pf = filp->private_data; 153 struct i40e_pf *pf = filp->private_data;
154 char dump_request_buf[16];
155 bool seid_found = false; 154 bool seid_found = false;
156 int bytes_not_copied;
157 long seid = -1; 155 long seid = -1;
158 int buflen = 0; 156 int buflen = 0;
159 int i, ret; 157 int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
163 /* don't allow partial writes */ 161 /* don't allow partial writes */
164 if (*ppos != 0) 162 if (*ppos != 0)
165 return 0; 163 return 0;
166 if (count >= sizeof(dump_request_buf))
167 return -ENOSPC;
168
169 bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
170 if (bytes_not_copied < 0)
171 return bytes_not_copied;
172 if (bytes_not_copied > 0)
173 count -= bytes_not_copied;
174 dump_request_buf[count] = '\0';
175 164
176 /* decode the SEID given to be dumped */ 165 /* decode the SEID given to be dumped */
177 ret = kstrtol(dump_request_buf, 0, &seid); 166 ret = kstrtol_from_user(buffer, count, 0, &seid);
178 if (ret < 0) { 167
179 dev_info(&pf->pdev->dev, "bad seid value '%s'\n", 168 if (ret) {
180 dump_request_buf); 169 dev_info(&pf->pdev->dev, "bad seid value\n");
181 } else if (seid == 0) { 170 } else if (seid == 0) {
182 seid_found = true; 171 seid_found = true;
183 172
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
245 memcpy(p, vsi, len); 234 memcpy(p, vsi, len);
246 p += len; 235 p += len;
247 236
248 len = (sizeof(struct i40e_q_vector) 237 if (vsi->num_q_vectors) {
249 * vsi->num_q_vectors); 238 len = (sizeof(struct i40e_q_vector)
250 memcpy(p, vsi->q_vectors, len); 239 * vsi->num_q_vectors);
251 p += len; 240 memcpy(p, vsi->q_vectors, len);
252 241 p += len;
253 len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs); 242 }
254 memcpy(p, vsi->tx_rings, len);
255 p += len;
256 memcpy(p, vsi->rx_rings, len);
257 p += len;
258 243
259 for (i = 0; i < vsi->num_queue_pairs; i++) { 244 if (vsi->num_queue_pairs) {
260 len = sizeof(struct i40e_tx_buffer); 245 len = (sizeof(struct i40e_ring) *
261 memcpy(p, vsi->tx_rings[i]->tx_bi, len); 246 vsi->num_queue_pairs);
247 memcpy(p, vsi->tx_rings, len);
248 p += len;
249 memcpy(p, vsi->rx_rings, len);
262 p += len; 250 p += len;
263 } 251 }
264 for (i = 0; i < vsi->num_queue_pairs; i++) { 252
253 if (vsi->tx_rings[0]) {
254 len = sizeof(struct i40e_tx_buffer);
255 for (i = 0; i < vsi->num_queue_pairs; i++) {
256 memcpy(p, vsi->tx_rings[i]->tx_bi, len);
257 p += len;
258 }
265 len = sizeof(struct i40e_rx_buffer); 259 len = sizeof(struct i40e_rx_buffer);
266 memcpy(p, vsi->rx_rings[i]->rx_bi, len); 260 for (i = 0; i < vsi->num_queue_pairs; i++) {
267 p += len; 261 memcpy(p, vsi->rx_rings[i]->rx_bi, len);
262 p += len;
263 }
268 } 264 }
269 265
270 /* macvlan filter list */ 266 /* macvlan filter list */
@@ -1023,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1023 size_t count, loff_t *ppos) 1019 size_t count, loff_t *ppos)
1024{ 1020{
1025 struct i40e_pf *pf = filp->private_data; 1021 struct i40e_pf *pf = filp->private_data;
1022 char *cmd_buf, *cmd_buf_tmp;
1026 int bytes_not_copied; 1023 int bytes_not_copied;
1027 struct i40e_vsi *vsi; 1024 struct i40e_vsi *vsi;
1028 u8 *print_buf_start; 1025 u8 *print_buf_start;
1029 u8 *print_buf; 1026 u8 *print_buf;
1030 char *cmd_buf;
1031 int vsi_seid; 1027 int vsi_seid;
1032 int veb_seid; 1028 int veb_seid;
1033 int cnt; 1029 int cnt;
@@ -1046,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1046 count -= bytes_not_copied; 1042 count -= bytes_not_copied;
1047 cmd_buf[count] = '\0'; 1043 cmd_buf[count] = '\0';
1048 1044
1045 cmd_buf_tmp = strchr(cmd_buf, '\n');
1046 if (cmd_buf_tmp) {
1047 *cmd_buf_tmp = '\0';
1048 count = cmd_buf_tmp - cmd_buf + 1;
1049 }
1050
1049 print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); 1051 print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
1050 if (!print_buf_start) 1052 if (!print_buf_start)
1051 goto command_write_done; 1053 goto command_write_done;
@@ -1152,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1152 i40e_veb_release(pf->veb[i]); 1154 i40e_veb_release(pf->veb[i]);
1153 1155
1154 } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { 1156 } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
1155 u8 ma[6];
1156 int vlan = 0;
1157 struct i40e_mac_filter *f; 1157 struct i40e_mac_filter *f;
1158 int vlan = 0;
1159 u8 ma[6];
1158 int ret; 1160 int ret;
1159 1161
1160 cnt = sscanf(&cmd_buf[11], 1162 cnt = sscanf(&cmd_buf[11],
@@ -1190,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1190 ma, vlan, vsi_seid, f, ret); 1192 ma, vlan, vsi_seid, f, ret);
1191 1193
1192 } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { 1194 } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
1193 u8 ma[6];
1194 int vlan = 0; 1195 int vlan = 0;
1196 u8 ma[6];
1195 int ret; 1197 int ret;
1196 1198
1197 cnt = sscanf(&cmd_buf[11], 1199 cnt = sscanf(&cmd_buf[11],
@@ -1227,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1227 ma, vlan, vsi_seid, ret); 1229 ma, vlan, vsi_seid, ret);
1228 1230
1229 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { 1231 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
1230 int v;
1231 u16 vid;
1232 i40e_status ret; 1232 i40e_status ret;
1233 u16 vid;
1234 int v;
1233 1235
1234 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); 1236 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
1235 if (cnt != 2) { 1237 if (cnt != 2) {
@@ -1540,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1540 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || 1542 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
1541 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { 1543 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
1542 struct i40e_fdir_data fd_data; 1544 struct i40e_fdir_data fd_data;
1543 int ret;
1544 u16 packet_len, i, j = 0; 1545 u16 packet_len, i, j = 0;
1545 char *asc_packet; 1546 char *asc_packet;
1546 bool add = false; 1547 bool add = false;
1548 int ret;
1547 1549
1548 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 1550 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1549 GFP_KERNEL); 1551 GFP_KERNEL);
@@ -1631,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1631 } 1633 }
1632 } else if (strncmp(&cmd_buf[5], 1634 } else if (strncmp(&cmd_buf[5],
1633 "get local", 9) == 0) { 1635 "get local", 9) == 0) {
1636 u16 llen, rlen;
1634 int ret, i; 1637 int ret, i;
1635 u8 *buff; 1638 u8 *buff;
1636 u16 llen, rlen;
1637 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1639 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1638 if (!buff) 1640 if (!buff)
1639 goto command_write_done; 1641 goto command_write_done;
@@ -1664,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1664 kfree(buff); 1666 kfree(buff);
1665 buff = NULL; 1667 buff = NULL;
1666 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { 1668 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1669 u16 llen, rlen;
1667 int ret, i; 1670 int ret, i;
1668 u8 *buff; 1671 u8 *buff;
1669 u16 llen, rlen;
1670 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1672 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1671 if (!buff) 1673 if (!buff)
1672 goto command_write_done; 1674 goto command_write_done;
@@ -1742,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1742 goto command_write_done; 1744 goto command_write_done;
1743 } 1745 }
1744 1746
1745 /* Read at least 512 words */ 1747 /* set the max length */
1746 if (buffer_len == 0) 1748 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1747 buffer_len = 512;
1748 1749
1749 bytes = 2 * buffer_len; 1750 bytes = 2 * buffer_len;
1751
1752 /* read at least 1k bytes, no more than 4kB */
1753 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1750 buff = kzalloc(bytes, GFP_KERNEL); 1754 buff = kzalloc(bytes, GFP_KERNEL);
1751 if (!buff) 1755 if (!buff)
1752 goto command_write_done; 1756 goto command_write_done;
@@ -1898,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1898 struct i40e_pf *pf = filp->private_data; 1902 struct i40e_pf *pf = filp->private_data;
1899 int bytes_not_copied; 1903 int bytes_not_copied;
1900 struct i40e_vsi *vsi; 1904 struct i40e_vsi *vsi;
1905 char *buf_tmp;
1901 int vsi_seid; 1906 int vsi_seid;
1902 int i, cnt; 1907 int i, cnt;
1903 1908
@@ -1916,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1916 count -= bytes_not_copied; 1921 count -= bytes_not_copied;
1917 i40e_dbg_netdev_ops_buf[count] = '\0'; 1922 i40e_dbg_netdev_ops_buf[count] = '\0';
1918 1923
1924 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1925 if (buf_tmp) {
1926 *buf_tmp = '\0';
1927 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1928 }
1929
1919 if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { 1930 if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
1920 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); 1931 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1921 if (cnt != 1) { 1932 if (cnt != 1) {
@@ -2019,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
2019 **/ 2030 **/
2020void i40e_dbg_pf_init(struct i40e_pf *pf) 2031void i40e_dbg_pf_init(struct i40e_pf *pf)
2021{ 2032{
2022 struct dentry *pfile __attribute__((unused)); 2033 struct dentry *pfile;
2023 const char *name = pci_name(pf->pdev); 2034 const char *name = pci_name(pf->pdev);
2035 const struct device *dev = &pf->pdev->dev;
2024 2036
2025 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); 2037 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
2026 if (pf->i40e_dbg_pf) { 2038 if (!pf->i40e_dbg_pf)
2027 pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, 2039 return;
2028 pf, &i40e_dbg_command_fops); 2040
2029 pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, 2041 pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
2030 &i40e_dbg_dump_fops); 2042 &i40e_dbg_command_fops);
2031 pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, 2043 if (!pfile)
2032 pf, &i40e_dbg_netdev_ops_fops); 2044 goto create_failed;
2033 } else { 2045
2034 dev_info(&pf->pdev->dev, 2046 pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
2035 "debugfs entry for %s failed\n", name); 2047 &i40e_dbg_dump_fops);
2036 } 2048 if (!pfile)
2049 goto create_failed;
2050
2051 pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
2052 &i40e_dbg_netdev_ops_fops);
2053 if (!pfile)
2054 goto create_failed;
2055
2056 return;
2057
2058create_failed:
2059 dev_info(dev, "debugfs dir/file for %s failed\n", name);
2060 debugfs_remove_recursive(pf->i40e_dbg_pf);
2061 return;
2037} 2062}
2038 2063
2039/** 2064/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fbe7fe2914a9..41a79df373d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
36 36
37#define DRV_VERSION_MAJOR 0 37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3 38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 10 39#define DRV_VERSION_BUILD 11
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \ 41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN 42 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2174,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2174 2174
2175 /* Now associate this queue with this PCI function */ 2175 /* Now associate this queue with this PCI function */
2176 qtx_ctl = I40E_QTX_CTL_PF_QUEUE; 2176 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2177 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) 2177 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2178 & I40E_QTX_CTL_PF_INDX_MASK); 2178 I40E_QTX_CTL_PF_INDX_MASK);
2179 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); 2179 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2180 i40e_flush(hw); 2180 i40e_flush(hw);
2181 2181
@@ -2532,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2532 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 2532 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2533 * @pf: board private structure 2533 * @pf: board private structure
2534 **/ 2534 **/
2535static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) 2535void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2536{ 2536{
2537 struct i40e_hw *hw = &pf->hw; 2537 struct i40e_hw *hw = &pf->hw;
2538 u32 val; 2538 u32 val;
@@ -2560,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2560 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 2560 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2561 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 2561 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2562 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); 2562 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2563 i40e_flush(hw); 2563 /* skip the flush */
2564} 2564}
2565 2565
2566/** 2566/**
@@ -2709,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2709 i40e_irq_dynamic_enable_icr0(pf); 2709 i40e_irq_dynamic_enable_icr0(pf);
2710 } 2710 }
2711 2711
2712 i40e_flush(&pf->hw);
2712 return 0; 2713 return 0;
2713} 2714}
2714 2715
@@ -2741,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
2741 2742
2742 icr0 = rd32(hw, I40E_PFINT_ICR0); 2743 icr0 = rd32(hw, I40E_PFINT_ICR0);
2743 2744
2744 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2745 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2746 return IRQ_NONE;
2747
2748 val = rd32(hw, I40E_PFINT_DYN_CTL0); 2745 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2749 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; 2746 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2750 wr32(hw, I40E_PFINT_DYN_CTL0, val); 2747 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2751 2748
2749 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2750 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2751 return IRQ_NONE;
2752
2752 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); 2753 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2753 2754
2754 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ 2755 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2762,7 +2763,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
2762 qval = rd32(hw, I40E_QINT_TQCTL(0)); 2763 qval = rd32(hw, I40E_QINT_TQCTL(0));
2763 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; 2764 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2764 wr32(hw, I40E_QINT_TQCTL(0), qval); 2765 wr32(hw, I40E_QINT_TQCTL(0), qval);
2765 i40e_flush(hw);
2766 2766
2767 if (!test_bit(__I40E_DOWN, &pf->state)) 2767 if (!test_bit(__I40E_DOWN, &pf->state))
2768 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); 2768 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
@@ -2824,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
2824 2824
2825 /* re-enable interrupt causes */ 2825 /* re-enable interrupt causes */
2826 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 2826 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2827 i40e_flush(hw);
2828 if (!test_bit(__I40E_DOWN, &pf->state)) { 2827 if (!test_bit(__I40E_DOWN, &pf->state)) {
2829 i40e_service_event_schedule(pf); 2828 i40e_service_event_schedule(pf);
2830 i40e_irq_dynamic_enable_icr0(pf); 2829 i40e_irq_dynamic_enable_icr0(pf);
@@ -4614,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
4614 bool new_vsi = false; 4613 bool new_vsi = false;
4615 int err, i; 4614 int err, i;
4616 4615
4617 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) 4616 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4617 I40E_FLAG_FDIR_ATR_ENABLED)))
4618 return; 4618 return;
4619 4619
4620 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 4620 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -5159,11 +5159,12 @@ static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5159{ 5159{
5160 int i; 5160 int i;
5161 5161
5162 for (i = 0; i < vsi->alloc_queue_pairs; i++) { 5162 if (vsi->tx_rings[0])
5163 kfree_rcu(vsi->tx_rings[i], rcu); 5163 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5164 vsi->tx_rings[i] = NULL; 5164 kfree_rcu(vsi->tx_rings[i], rcu);
5165 vsi->rx_rings[i] = NULL; 5165 vsi->tx_rings[i] = NULL;
5166 } 5166 vsi->rx_rings[i] = NULL;
5167 }
5167 5168
5168 return 0; 5169 return 0;
5169} 5170}
@@ -5433,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5433 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { 5434 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5434 err = i40e_init_msix(pf); 5435 err = i40e_init_msix(pf);
5435 if (err) { 5436 if (err) {
5436 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | 5437 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5438 I40E_FLAG_RSS_ENABLED |
5437 I40E_FLAG_MQ_ENABLED | 5439 I40E_FLAG_MQ_ENABLED |
5438 I40E_FLAG_DCB_ENABLED | 5440 I40E_FLAG_DCB_ENABLED |
5439 I40E_FLAG_SRIOV_ENABLED | 5441 I40E_FLAG_SRIOV_ENABLED |
@@ -5448,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5448 5450
5449 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 5451 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5450 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 5452 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
5453 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
5451 err = pci_enable_msi(pf->pdev); 5454 err = pci_enable_msi(pf->pdev);
5452 if (err) { 5455 if (err) {
5453 dev_info(&pf->pdev->dev, 5456 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
5454 "MSI init failed (%d), trying legacy.\n", err);
5455 pf->flags &= ~I40E_FLAG_MSI_ENABLED; 5457 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5456 } 5458 }
5457 } 5459 }
5458 5460
5461 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5462 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5463
5459 /* track first vector for misc interrupts */ 5464 /* track first vector for misc interrupts */
5460 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 5465 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5461} 5466}
@@ -6108,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6108 goto vector_setup_out; 6113 goto vector_setup_out;
6109 } 6114 }
6110 6115
6111 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, 6116 if (vsi->num_q_vectors)
6112 vsi->num_q_vectors, vsi->idx); 6117 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6118 vsi->num_q_vectors, vsi->idx);
6113 if (vsi->base_vector < 0) { 6119 if (vsi->base_vector < 0) {
6114 dev_info(&pf->pdev->dev, 6120 dev_info(&pf->pdev->dev,
6115 "failed to get q tracking for VSI %d, err=%d\n", 6121 "failed to get q tracking for VSI %d, err=%d\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index dc89e72fd0f4..f1f03bc5c729 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); 37 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
38} 38}
39 39
40#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
40/** 41/**
41 * i40e_program_fdir_filter - Program a Flow Director filter 42 * i40e_program_fdir_filter - Program a Flow Director filter
42 * @fdir_input: Packet data that will be filter parameters 43 * @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
50 struct i40e_tx_buffer *tx_buf; 51 struct i40e_tx_buffer *tx_buf;
51 struct i40e_tx_desc *tx_desc; 52 struct i40e_tx_desc *tx_desc;
52 struct i40e_ring *tx_ring; 53 struct i40e_ring *tx_ring;
54 unsigned int fpt, dcc;
53 struct i40e_vsi *vsi; 55 struct i40e_vsi *vsi;
54 struct device *dev; 56 struct device *dev;
55 dma_addr_t dma; 57 dma_addr_t dma;
@@ -68,7 +70,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
68 dev = tx_ring->dev; 70 dev = tx_ring->dev;
69 71
70 dma = dma_map_single(dev, fdir_data->raw_packet, 72 dma = dma_map_single(dev, fdir_data->raw_packet,
71 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); 73 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
72 if (dma_mapping_error(dev, dma)) 74 if (dma_mapping_error(dev, dma))
73 goto dma_fail; 75 goto dma_fail;
74 76
@@ -77,74 +79,61 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
77 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); 79 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
78 tx_buf = &tx_ring->tx_bi[i]; 80 tx_buf = &tx_ring->tx_bi[i];
79 81
80 i++; 82 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
81 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
82 83
83 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index 84 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
84 << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) 85 I40E_TXD_FLTR_QW0_QINDEX_MASK;
85 & I40E_TXD_FLTR_QW0_QINDEX_MASK);
86 86
87 fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off 87 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
88 << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) 88 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
89 & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
90 89
91 fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype 90 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
92 << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) 91 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
93 & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
94 92
95 /* Use LAN VSI Id if not programmed by user */ 93 /* Use LAN VSI Id if not programmed by user */
96 if (fdir_data->dest_vsi == 0) 94 if (fdir_data->dest_vsi == 0)
97 fdir_desc->qindex_flex_ptype_vsi |= 95 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
98 cpu_to_le32((pf->vsi[pf->lan_vsi]->id) 96 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
99 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
100 else 97 else
101 fdir_desc->qindex_flex_ptype_vsi |= 98 fpt |= ((u32)fdir_data->dest_vsi <<
102 cpu_to_le32((fdir_data->dest_vsi 99 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
103 << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) 100 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
104 & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
105 101
106 fdir_desc->dtype_cmd_cntindex = 102 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
107 cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG); 103
104 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
108 105
109 if (add) 106 if (add)
110 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( 107 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
111 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE 108 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
112 << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
113 else 109 else
114 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( 110 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
115 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE 111 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
116 << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
117 112
118 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl 113 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
119 << I40E_TXD_FLTR_QW1_DEST_SHIFT) 114 I40E_TXD_FLTR_QW1_DEST_MASK;
120 & I40E_TXD_FLTR_QW1_DEST_MASK);
121 115
122 fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( 116 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
123 (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) 117 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
124 & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
125 118
126 if (fdir_data->cnt_index != 0) { 119 if (fdir_data->cnt_index != 0) {
127 fdir_desc->dtype_cmd_cntindex |= 120 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
128 cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); 121 dcc |= ((u32)fdir_data->cnt_index <<
129 fdir_desc->dtype_cmd_cntindex |= 122 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
130 cpu_to_le32((fdir_data->cnt_index 123 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
131 << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
132 & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
133 } 124 }
134 125
126 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
135 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); 127 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
136 128
137 /* Now program a dummy descriptor */ 129 /* Now program a dummy descriptor */
138 i = tx_ring->next_to_use; 130 i = tx_ring->next_to_use;
139 tx_desc = I40E_TX_DESC(tx_ring, i); 131 tx_desc = I40E_TX_DESC(tx_ring, i);
140 132
141 i++; 133 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
142 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
143 134
144 tx_desc->buffer_addr = cpu_to_le64(dma); 135 tx_desc->buffer_addr = cpu_to_le64(dma);
145 td_cmd = I40E_TX_DESC_CMD_EOP | 136 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
146 I40E_TX_DESC_CMD_RS |
147 I40E_TX_DESC_CMD_DUMMY;
148 137
149 tx_desc->cmd_type_offset_bsz = 138 tx_desc->cmd_type_offset_bsz =
150 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); 139 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
@@ -559,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
559 i40e_set_new_dynamic_itr(&q_vector->tx); 548 i40e_set_new_dynamic_itr(&q_vector->tx);
560 if (old_itr != q_vector->tx.itr) 549 if (old_itr != q_vector->tx.itr)
561 wr32(hw, reg_addr, q_vector->tx.itr); 550 wr32(hw, reg_addr, q_vector->tx.itr);
562
563 i40e_flush(hw);
564} 551}
565 552
566/** 553/**
@@ -1155,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
1155 qval = rd32(hw, I40E_QINT_TQCTL(0)); 1142 qval = rd32(hw, I40E_QINT_TQCTL(0));
1156 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; 1143 qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1157 wr32(hw, I40E_QINT_TQCTL(0), qval); 1144 wr32(hw, I40E_QINT_TQCTL(0), qval);
1158 i40e_flush(hw); 1145
1146 i40e_irq_dynamic_enable_icr0(vsi->back);
1159 } 1147 }
1160 } 1148 }
1161 1149
@@ -1256,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1256 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); 1244 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
1257} 1245}
1258 1246
1259#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
1260/** 1247/**
1261 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 1248 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1262 * @skb: send buffer 1249 * @skb: send buffer
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58e2408..07596982a477 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 251 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
252 else 252 else
253 reg_idx = I40E_VPINT_LNKLSTN( 253 reg_idx = I40E_VPINT_LNKLSTN(
254 ((pf->hw.func_caps.num_msix_vectors_vf - 1) 254 (pf->hw.func_caps.num_msix_vectors_vf
255 * vf->vf_id) + (vector_id - 1)); 255 * vf->vf_id) + (vector_id - 1));
256 256
257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 257 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
383 383
384 /* associate this queue with the PCI VF function */ 384 /* associate this queue with the PCI VF function */
385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE; 385 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
386 qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) 386 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
387 & I40E_QTX_CTL_PF_INDX_MASK); 387 & I40E_QTX_CTL_PF_INDX_MASK);
388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) 388 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
389 << I40E_QTX_CTL_VFVM_INDX_SHIFT) 389 << I40E_QTX_CTL_VFVM_INDX_SHIFT)