aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-12-18 14:58:41 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-18 14:58:41 -0500
commitac727c49d772c059d6c544680d08c1832b93b67a (patch)
tree99f2714216e834cf0589b3feda6038ad0fe042bd
parent3627287463b4acddb83d24fabb1e0a304e39565c (diff)
parentcd14ef54d25bcf0b8e9205e75369e33b1d188417 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates This series contains updates to i40e, ixgbevf, ixgbe and igb. Don provides an ixgbevf patch to add DCB configuration into the queue setup so that we won't have to allocate queues in a separate place when enabling DCB. Guenter Roeck provides 2 patches for ixgbe to simplify the code by attaching hwmon sysfs attributes to hwmon device instead of PCI device. Also fix an issues where the temperature sensor attribute index was being started with the value 0 and not 1 as per the hwmon API. Carolyn provides igb patches to fix queue allocation method to accommodate changes during runtime. This includes changing how the driver initializes MSIx and checks for MSIx configuration to make it easier to reconfigure the device when queue changes happen at runtime. Neerav and Shannon fixes i40e debugfs commands that dump hex information by using print_hex_dump(). Shannon provides several i40e fixes which include the prevention of null pointer exception in the dump descriptor by checking that rings were allocated before trying to reference them. Fixed up a couple of scanfs to accept various base numbers instead of silently requiring hex. Anjali fixes up i40e where the incorrect defines were being used for misc interrupts. Alan Cox provides a fix for i40e where we assume that the resulting buffer is zero terminated when we then re-use it. The sscanf is limited to 512 bytes but needs to be 511 to allow for a terminator. Stephen Hemminger fixes i40e by making local functions static and removes unused code (i40e_aq_add/remove_vlan() functions). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c80
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c132
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c94
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c228
12 files changed, 243 insertions, 419 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 30f32f3a86bc..c7e286621a94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -31,6 +31,8 @@
31#include "i40e_adminq.h" 31#include "i40e_adminq.h"
32#include "i40e_prototype.h" 32#include "i40e_prototype.h"
33 33
34static void i40e_resume_aq(struct i40e_hw *hw);
35
34/** 36/**
35 * i40e_adminq_init_regs - Initialize AdminQ registers 37 * i40e_adminq_init_regs - Initialize AdminQ registers
36 * @hw: pointer to the hardware structure 38 * @hw: pointer to the hardware structure
@@ -675,7 +677,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
675 * Returns true if the firmware has processed all descriptors on the 677 * Returns true if the firmware has processed all descriptors on the
676 * admin send queue. Returns false if there are still requests pending. 678 * admin send queue. Returns false if there are still requests pending.
677 **/ 679 **/
678bool i40e_asq_done(struct i40e_hw *hw) 680static bool i40e_asq_done(struct i40e_hw *hw)
679{ 681{
680 /* AQ designers suggest use of head for better 682 /* AQ designers suggest use of head for better
681 * timing reliability than DD bit 683 * timing reliability than DD bit
@@ -963,7 +965,7 @@ clean_arq_element_out:
963 return ret_code; 965 return ret_code;
964} 966}
965 967
966void i40e_resume_aq(struct i40e_hw *hw) 968static void i40e_resume_aq(struct i40e_hw *hw)
967{ 969{
968 u32 reg = 0; 970 u32 reg = 0;
969 971
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e526d40e9cf8..8b6d56a82fce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1129,86 +1129,6 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
1129} 1129}
1130 1130
1131/** 1131/**
1132 * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
1133 * @hw: pointer to the hw struct
1134 * @seid: VSI for the vlan filters
1135 * @v_list: list of vlan filters to be added
1136 * @count: length of the list
1137 * @cmd_details: pointer to command details structure or NULL
1138 **/
1139i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
1140 struct i40e_aqc_add_remove_vlan_element_data *v_list,
1141 u8 count, struct i40e_asq_cmd_details *cmd_details)
1142{
1143 struct i40e_aq_desc desc;
1144 struct i40e_aqc_macvlan *cmd =
1145 (struct i40e_aqc_macvlan *)&desc.params.raw;
1146 i40e_status status;
1147 u16 buf_size;
1148
1149 if (count == 0 || !v_list || !hw)
1150 return I40E_ERR_PARAM;
1151
1152 buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
1153
1154 /* prep the rest of the request */
1155 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
1156 cmd->num_addresses = cpu_to_le16(count);
1157 cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
1158 cmd->seid[1] = 0;
1159 cmd->seid[2] = 0;
1160
1161 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1162 if (buf_size > I40E_AQ_LARGE_BUF)
1163 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1164
1165 status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
1166 cmd_details);
1167
1168 return status;
1169}
1170
1171/**
1172 * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
1173 * @hw: pointer to the hw struct
1174 * @seid: VSI for the vlan filters
1175 * @v_list: list of macvlans to be removed
1176 * @count: length of the list
1177 * @cmd_details: pointer to command details structure or NULL
1178 **/
1179i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
1180 struct i40e_aqc_add_remove_vlan_element_data *v_list,
1181 u8 count, struct i40e_asq_cmd_details *cmd_details)
1182{
1183 struct i40e_aq_desc desc;
1184 struct i40e_aqc_macvlan *cmd =
1185 (struct i40e_aqc_macvlan *)&desc.params.raw;
1186 i40e_status status;
1187 u16 buf_size;
1188
1189 if (count == 0 || !v_list || !hw)
1190 return I40E_ERR_PARAM;
1191
1192 buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
1193
1194 /* prep the rest of the request */
1195 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
1196 cmd->num_addresses = cpu_to_le16(count);
1197 cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
1198 cmd->seid[1] = 0;
1199 cmd->seid[2] = 0;
1200
1201 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
1202 if (buf_size > I40E_AQ_LARGE_BUF)
1203 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1204
1205 status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
1206 cmd_details);
1207
1208 return status;
1209}
1210
1211/**
1212 * i40e_aq_send_msg_to_vf 1132 * i40e_aq_send_msg_to_vf
1213 * @hw: pointer to the hardware structure 1133 * @hw: pointer to the hardware structure
1214 * @vfid: vf id to send msg 1134 * @vfid: vf id to send msg
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 9a59dda6b5ce..e201060fe368 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -362,7 +362,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
362} 362}
363 363
364/** 364/**
365 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum 365 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
366 * @pf: the i40e_pf created in command write 366 * @pf: the i40e_pf created in command write
367 * @seid: the seid the user put in 367 * @seid: the seid the user put in
368 **/ 368 **/
@@ -707,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
707{ 707{
708 struct i40e_adminq_ring *ring; 708 struct i40e_adminq_ring *ring;
709 struct i40e_hw *hw = &pf->hw; 709 struct i40e_hw *hw = &pf->hw;
710 char hdr[32];
710 int i; 711 int i;
711 712
713 snprintf(hdr, sizeof(hdr), "%s %s: ",
714 dev_driver_string(&pf->pdev->dev),
715 dev_name(&pf->pdev->dev));
716
712 /* first the send (command) ring, then the receive (event) ring */ 717 /* first the send (command) ring, then the receive (event) ring */
713 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); 718 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
714 ring = &(hw->aq.asq); 719 ring = &(hw->aq.asq);
@@ -718,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
718 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 723 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
719 i, d->flags, d->opcode, d->datalen, d->retval, 724 i, d->flags, d->opcode, d->datalen, d->retval,
720 d->cookie_high, d->cookie_low); 725 d->cookie_high, d->cookie_low);
721 dev_info(&pf->pdev->dev, 726 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
722 " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 727 16, 1, d->params.raw, 16, 0);
723 d->params.raw[0], d->params.raw[1], d->params.raw[2],
724 d->params.raw[3], d->params.raw[4], d->params.raw[5],
725 d->params.raw[6], d->params.raw[7], d->params.raw[8],
726 d->params.raw[9], d->params.raw[10], d->params.raw[11],
727 d->params.raw[12], d->params.raw[13],
728 d->params.raw[14], d->params.raw[15]);
729 } 728 }
730 729
731 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); 730 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -736,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
736 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", 735 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
737 i, d->flags, d->opcode, d->datalen, d->retval, 736 i, d->flags, d->opcode, d->datalen, d->retval,
738 d->cookie_high, d->cookie_low); 737 d->cookie_high, d->cookie_low);
739 dev_info(&pf->pdev->dev, 738 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
740 " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 739 16, 1, d->params.raw, 16, 0);
741 d->params.raw[0], d->params.raw[1], d->params.raw[2],
742 d->params.raw[3], d->params.raw[4], d->params.raw[5],
743 d->params.raw[6], d->params.raw[7], d->params.raw[8],
744 d->params.raw[9], d->params.raw[10], d->params.raw[11],
745 d->params.raw[12], d->params.raw[13],
746 d->params.raw[14], d->params.raw[15]);
747 } 740 }
748} 741}
749 742
@@ -766,20 +759,17 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
766 759
767 vsi = i40e_dbg_find_vsi(pf, vsi_seid); 760 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
768 if (!vsi) { 761 if (!vsi) {
769 dev_info(&pf->pdev->dev, 762 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
770 "vsi %d not found\n", vsi_seid);
771 if (is_rx_ring)
772 dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
773 else
774 dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
775 return; 763 return;
776 } 764 }
777 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { 765 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
778 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); 766 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
779 if (is_rx_ring) 767 return;
780 dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 768 }
781 else 769 if (!vsi->tx_rings) {
782 dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 770 dev_info(&pf->pdev->dev,
771 "descriptor rings have not been allocated for vsi %d\n",
772 vsi_seid);
783 return; 773 return;
784 } 774 }
785 if (is_rx_ring) 775 if (is_rx_ring)
@@ -830,10 +820,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
830 desc_n, ds->read.pkt_addr, ds->read.hdr_addr, 820 desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
831 ds->read.rsvd1, ds->read.rsvd2); 821 ds->read.rsvd1, ds->read.rsvd2);
832 } else { 822 } else {
833 if (is_rx_ring) 823 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
834 dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
835 else
836 dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
837 } 824 }
838} 825}
839 826
@@ -979,8 +966,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
979 966
980 veb = i40e_dbg_find_veb(pf, seid); 967 veb = i40e_dbg_find_veb(pf, seid);
981 if (!veb) { 968 if (!veb) {
982 dev_info(&pf->pdev->dev, 969 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
983 "%d: can't find veb\n", seid);
984 return; 970 return;
985 } 971 }
986 dev_info(&pf->pdev->dev, 972 dev_info(&pf->pdev->dev,
@@ -1022,8 +1008,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1022 char *cmd_buf, *cmd_buf_tmp; 1008 char *cmd_buf, *cmd_buf_tmp;
1023 int bytes_not_copied; 1009 int bytes_not_copied;
1024 struct i40e_vsi *vsi; 1010 struct i40e_vsi *vsi;
1025 u8 *print_buf_start;
1026 u8 *print_buf;
1027 int vsi_seid; 1011 int vsi_seid;
1028 int veb_seid; 1012 int veb_seid;
1029 int cnt; 1013 int cnt;
@@ -1048,11 +1032,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1048 count = cmd_buf_tmp - cmd_buf + 1; 1032 count = cmd_buf_tmp - cmd_buf + 1;
1049 } 1033 }
1050 1034
1051 print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
1052 if (!print_buf_start)
1053 goto command_write_done;
1054 print_buf = print_buf_start;
1055
1056 if (strncmp(cmd_buf, "add vsi", 7) == 0) { 1035 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
1057 vsi_seid = -1; 1036 vsi_seid = -1;
1058 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); 1037 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
@@ -1479,7 +1458,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1479 } else if (strncmp(cmd_buf, "read", 4) == 0) { 1458 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1480 u32 address; 1459 u32 address;
1481 u32 value; 1460 u32 value;
1482 cnt = sscanf(&cmd_buf[4], "%x", &address); 1461 cnt = sscanf(&cmd_buf[4], "%i", &address);
1483 if (cnt != 1) { 1462 if (cnt != 1) {
1484 dev_info(&pf->pdev->dev, "read <reg>\n"); 1463 dev_info(&pf->pdev->dev, "read <reg>\n");
1485 goto command_write_done; 1464 goto command_write_done;
@@ -1498,7 +1477,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1498 1477
1499 } else if (strncmp(cmd_buf, "write", 5) == 0) { 1478 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1500 u32 address, value; 1479 u32 address, value;
1501 cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value); 1480 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1502 if (cnt != 2) { 1481 if (cnt != 2) {
1503 dev_info(&pf->pdev->dev, "write <reg> <value>\n"); 1482 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1504 goto command_write_done; 1483 goto command_write_done;
@@ -1516,7 +1495,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1516 address, value); 1495 address, value);
1517 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { 1496 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1518 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { 1497 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1519 cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid); 1498 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1520 if (cnt == 0) { 1499 if (cnt == 0) {
1521 int i; 1500 int i;
1522 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) 1501 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
@@ -1568,7 +1547,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1568 if (strncmp(cmd_buf, "add", 3) == 0) 1547 if (strncmp(cmd_buf, "add", 3) == 0)
1569 add = true; 1548 add = true;
1570 cnt = sscanf(&cmd_buf[13], 1549 cnt = sscanf(&cmd_buf[13],
1571 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s", 1550 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
1572 &fd_data.q_index, 1551 &fd_data.q_index,
1573 &fd_data.flex_off, &fd_data.pctype, 1552 &fd_data.flex_off, &fd_data.pctype,
1574 &fd_data.dest_vsi, &fd_data.dest_ctl, 1553 &fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1592,19 +1571,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1592 packet_len = min_t(u16, 1571 packet_len = min_t(u16,
1593 packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); 1572 packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
1594 1573
1595 dev_info(&pf->pdev->dev, "FD raw packet:\n");
1596 for (i = 0; i < packet_len; i++) { 1574 for (i = 0; i < packet_len; i++) {
1597 sscanf(&asc_packet[j], "%2hhx ", 1575 sscanf(&asc_packet[j], "%2hhx ",
1598 &fd_data.raw_packet[i]); 1576 &fd_data.raw_packet[i]);
1599 j += 3; 1577 j += 3;
1600 snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
1601 print_buf += 3;
1602 if ((i % 16) == 15) {
1603 snprintf(print_buf, 1, "\n");
1604 print_buf++;
1605 }
1606 } 1578 }
1607 dev_info(&pf->pdev->dev, "%s\n", print_buf_start); 1579 dev_info(&pf->pdev->dev, "FD raw packet dump\n");
1580 print_hex_dump(KERN_INFO, "FD raw packet: ",
1581 DUMP_PREFIX_OFFSET, 16, 1,
1582 fd_data.raw_packet, packet_len, true);
1608 ret = i40e_program_fdir_filter(&fd_data, pf, add); 1583 ret = i40e_program_fdir_filter(&fd_data, pf, add);
1609 if (!ret) { 1584 if (!ret) {
1610 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); 1585 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
@@ -1638,7 +1613,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1638 } else if (strncmp(&cmd_buf[5], 1613 } else if (strncmp(&cmd_buf[5],
1639 "get local", 9) == 0) { 1614 "get local", 9) == 0) {
1640 u16 llen, rlen; 1615 u16 llen, rlen;
1641 int ret, i; 1616 int ret;
1642 u8 *buff; 1617 u8 *buff;
1643 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1618 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1644 if (!buff) 1619 if (!buff)
@@ -1656,22 +1631,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1656 buff = NULL; 1631 buff = NULL;
1657 goto command_write_done; 1632 goto command_write_done;
1658 } 1633 }
1659 dev_info(&pf->pdev->dev, 1634 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1660 "Get LLDP MIB (local) AQ buffer written back:\n"); 1635 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1661 for (i = 0; i < I40E_LLDPDU_SIZE; i++) { 1636 DUMP_PREFIX_OFFSET, 16, 1,
1662 snprintf(print_buf, 3, "%02x ", buff[i]); 1637 buff, I40E_LLDPDU_SIZE, true);
1663 print_buf += 3;
1664 if ((i % 16) == 15) {
1665 snprintf(print_buf, 1, "\n");
1666 print_buf++;
1667 }
1668 }
1669 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1670 kfree(buff); 1638 kfree(buff);
1671 buff = NULL; 1639 buff = NULL;
1672 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { 1640 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1673 u16 llen, rlen; 1641 u16 llen, rlen;
1674 int ret, i; 1642 int ret;
1675 u8 *buff; 1643 u8 *buff;
1676 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); 1644 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1677 if (!buff) 1645 if (!buff)
@@ -1690,17 +1658,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1690 buff = NULL; 1658 buff = NULL;
1691 goto command_write_done; 1659 goto command_write_done;
1692 } 1660 }
1693 dev_info(&pf->pdev->dev, 1661 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1694 "Get LLDP MIB (remote) AQ buffer written back:\n"); 1662 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1695 for (i = 0; i < I40E_LLDPDU_SIZE; i++) { 1663 DUMP_PREFIX_OFFSET, 16, 1,
1696 snprintf(print_buf, 3, "%02x ", buff[i]); 1664 buff, I40E_LLDPDU_SIZE, true);
1697 print_buf += 3;
1698 if ((i % 16) == 15) {
1699 snprintf(print_buf, 1, "\n");
1700 print_buf++;
1701 }
1702 }
1703 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1704 kfree(buff); 1665 kfree(buff);
1705 buff = NULL; 1666 buff = NULL;
1706 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { 1667 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1725,7 +1686,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1725 } 1686 }
1726 } 1687 }
1727 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { 1688 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1728 u16 buffer_len, i, bytes; 1689 u16 buffer_len, bytes;
1729 u16 module; 1690 u16 module;
1730 u32 offset; 1691 u32 offset;
1731 u16 *buff; 1692 u16 *buff;
@@ -1779,16 +1740,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1779 dev_info(&pf->pdev->dev, 1740 dev_info(&pf->pdev->dev,
1780 "Read NVM module=0x%x offset=0x%x words=%d\n", 1741 "Read NVM module=0x%x offset=0x%x words=%d\n",
1781 module, offset, buffer_len); 1742 module, offset, buffer_len);
1782 for (i = 0; i < buffer_len; i++) { 1743 if (buffer_len)
1783 if ((i % 16) == 0) { 1744 print_hex_dump(KERN_INFO, "NVM Dump: ",
1784 snprintf(print_buf, 11, "\n0x%08x: ", 1745 DUMP_PREFIX_OFFSET, 16, 2,
1785 offset + i); 1746 buff, buffer_len, true);
1786 print_buf += 11;
1787 }
1788 snprintf(print_buf, 5, "%04x ", buff[i]);
1789 print_buf += 5;
1790 }
1791 dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
1792 } 1747 }
1793 kfree(buff); 1748 kfree(buff);
1794 buff = NULL; 1749 buff = NULL;
@@ -1832,9 +1787,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1832command_write_done: 1787command_write_done:
1833 kfree(cmd_buf); 1788 kfree(cmd_buf);
1834 cmd_buf = NULL; 1789 cmd_buf = NULL;
1835 kfree(print_buf_start);
1836 print_buf = NULL;
1837 print_buf_start = NULL;
1838 return count; 1790 return count;
1839} 1791}
1840 1792
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fba0aada062a..da5e8e441e26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
36 36
37#define DRV_VERSION_MAJOR 0 37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3 38#define DRV_VERSION_MINOR 3
39#define DRV_VERSION_BUILD 13 39#define DRV_VERSION_BUILD 14
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \ 41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN 42 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2489,8 +2489,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2489 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2489 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2490 2490
2491 /* SW_ITR_IDX = 0, but don't change INTENA */ 2491 /* SW_ITR_IDX = 0, but don't change INTENA */
2492 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK | 2492 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2493 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK); 2493 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2494 2494
2495 /* OTHER_ITR_IDX = 0 */ 2495 /* OTHER_ITR_IDX = 0 */
2496 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 2496 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 930f53a2f50c..2fc9ce528d4d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -51,7 +51,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
51 void *buff, /* can be NULL */ 51 void *buff, /* can be NULL */
52 u16 buff_size, 52 u16 buff_size,
53 struct i40e_asq_cmd_details *cmd_details); 53 struct i40e_asq_cmd_details *cmd_details);
54bool i40e_asq_done(struct i40e_hw *hw);
55 54
56/* debug function for adminq */ 55/* debug function for adminq */
57void i40e_debug_aq(struct i40e_hw *hw, 56void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,7 +59,6 @@ void i40e_debug_aq(struct i40e_hw *hw,
60 void *buffer); 59 void *buffer);
61 60
62void i40e_idle_aq(struct i40e_hw *hw); 61void i40e_idle_aq(struct i40e_hw *hw);
63void i40e_resume_aq(struct i40e_hw *hw);
64 62
65u32 i40e_led_get(struct i40e_hw *hw); 63u32 i40e_led_get(struct i40e_hw *hw);
66void i40e_led_set(struct i40e_hw *hw, u32 mode); 64void i40e_led_set(struct i40e_hw *hw, u32 mode);
@@ -120,12 +118,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
120i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, 118i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
121 struct i40e_aqc_remove_macvlan_element_data *mv_list, 119 struct i40e_aqc_remove_macvlan_element_data *mv_list,
122 u16 count, struct i40e_asq_cmd_details *cmd_details); 120 u16 count, struct i40e_asq_cmd_details *cmd_details);
123i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
124 struct i40e_aqc_add_remove_vlan_element_data *v_list,
125 u8 count, struct i40e_asq_cmd_details *cmd_details);
126i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
127 struct i40e_aqc_add_remove_vlan_element_data *v_list,
128 u8 count, struct i40e_asq_cmd_details *cmd_details);
129i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 121i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
130 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 122 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
131 struct i40e_asq_cmd_details *cmd_details); 123 struct i40e_asq_cmd_details *cmd_details);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aaca0a1dca0..ccf472f073dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -41,6 +41,7 @@
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <linux/i2c.h> 42#include <linux/i2c.h>
43#include <linux/i2c-algo-bit.h> 43#include <linux/i2c-algo-bit.h>
44#include <linux/pci.h>
44 45
45struct igb_adapter; 46struct igb_adapter;
46 47
@@ -67,6 +68,7 @@ struct igb_adapter;
67#define IGB_MIN_ITR_USECS 10 68#define IGB_MIN_ITR_USECS 10
68#define NON_Q_VECTORS 1 69#define NON_Q_VECTORS 1
69#define MAX_Q_VECTORS 8 70#define MAX_Q_VECTORS 8
71#define MAX_MSIX_ENTRIES 10
70 72
71/* Transmit and receive queues */ 73/* Transmit and receive queues */
72#define IGB_MAX_RX_QUEUES 8 74#define IGB_MAX_RX_QUEUES 8
@@ -127,9 +129,9 @@ struct vf_data_storage {
127#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) 129#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
128#define IGB_TX_HTHRESH 1 130#define IGB_TX_HTHRESH 1
129#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ 131#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
130 adapter->msix_entries) ? 1 : 4) 132 (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
131#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ 133#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
132 adapter->msix_entries) ? 1 : 16) 134 (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
133 135
134/* this is the size past which hardware will drop packets when setting LPE=0 */ 136/* this is the size past which hardware will drop packets when setting LPE=0 */
135#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 137#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -357,7 +359,7 @@ struct igb_adapter {
357 unsigned int flags; 359 unsigned int flags;
358 360
359 unsigned int num_q_vectors; 361 unsigned int num_q_vectors;
360 struct msix_entry *msix_entries; 362 struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
361 363
362 /* Interrupt Throttle Rate */ 364 /* Interrupt Throttle Rate */
363 u32 rx_itr_setting; 365 u32 rx_itr_setting;
@@ -469,6 +471,7 @@ struct igb_adapter {
469#define IGB_FLAG_MEDIA_RESET (1 << 10) 471#define IGB_FLAG_MEDIA_RESET (1 << 10)
470#define IGB_FLAG_MAS_CAPABLE (1 << 11) 472#define IGB_FLAG_MAS_CAPABLE (1 << 11)
471#define IGB_FLAG_MAS_ENABLE (1 << 12) 473#define IGB_FLAG_MAS_ENABLE (1 << 12)
474#define IGB_FLAG_HAS_MSIX (1 << 13)
472 475
473/* Media Auto Sense */ 476/* Media Auto Sense */
474#define IGB_MAS_ENABLE_0 0X0001 477#define IGB_MAS_ENABLE_0 0X0001
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1c7d2381af8c..1df02378de69 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1386,7 +1386,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1386 *data = 0; 1386 *data = 0;
1387 1387
1388 /* Hook up test interrupt handler just for this test */ 1388 /* Hook up test interrupt handler just for this test */
1389 if (adapter->msix_entries) { 1389 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1390 if (request_irq(adapter->msix_entries[0].vector, 1390 if (request_irq(adapter->msix_entries[0].vector,
1391 igb_test_intr, 0, netdev->name, adapter)) { 1391 igb_test_intr, 0, netdev->name, adapter)) {
1392 *data = 1; 1392 *data = 1;
@@ -1519,7 +1519,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1519 msleep(10); 1519 msleep(10);
1520 1520
1521 /* Unhook test interrupt handler */ 1521 /* Unhook test interrupt handler */
1522 if (adapter->msix_entries) 1522 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1523 free_irq(adapter->msix_entries[0].vector, adapter); 1523 free_irq(adapter->msix_entries[0].vector, adapter);
1524 else 1524 else
1525 free_irq(irq, adapter); 1525 free_irq(irq, adapter);
@@ -2933,7 +2933,7 @@ static void igb_get_channels(struct net_device *netdev,
2933 ch->max_combined = igb_max_channels(adapter); 2933 ch->max_combined = igb_max_channels(adapter);
2934 2934
2935 /* Report info for other vector */ 2935 /* Report info for other vector */
2936 if (adapter->msix_entries) { 2936 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
2937 ch->max_other = NON_Q_VECTORS; 2937 ch->max_other = NON_Q_VECTORS;
2938 ch->other_count = NON_Q_VECTORS; 2938 ch->other_count = NON_Q_VECTORS;
2939 } 2939 }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3bc10bd5bbc1..46d31a49f5ea 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -803,7 +803,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
803 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 803 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
804 if (tx_queue > IGB_N0_QUEUE) 804 if (tx_queue > IGB_N0_QUEUE)
805 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 805 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
806 if (!adapter->msix_entries && msix_vector == 0) 806 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
807 msixbm |= E1000_EIMS_OTHER; 807 msixbm |= E1000_EIMS_OTHER;
808 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 808 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
809 q_vector->eims_value = msixbm; 809 q_vector->eims_value = msixbm;
@@ -983,43 +983,58 @@ err_out:
983 return err; 983 return err;
984} 984}
985 985
986static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
987{
988 if (adapter->msix_entries) {
989 pci_disable_msix(adapter->pdev);
990 kfree(adapter->msix_entries);
991 adapter->msix_entries = NULL;
992 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
993 pci_disable_msi(adapter->pdev);
994 }
995}
996
997/** 986/**
998 * igb_free_q_vector - Free memory allocated for specific interrupt vector 987 * igb_free_q_vector - Free memory allocated for specific interrupt vector
999 * @adapter: board private structure to initialize 988 * @adapter: board private structure to initialize
1000 * @v_idx: Index of vector to be freed 989 * @v_idx: Index of vector to be freed
1001 * 990 *
1002 * This function frees the memory allocated to the q_vector. In addition if 991 * This function frees the memory allocated to the q_vector.
1003 * NAPI is enabled it will delete any references to the NAPI struct prior
1004 * to freeing the q_vector.
1005 **/ 992 **/
1006static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) 993static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1007{ 994{
1008 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 995 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1009 996
997 adapter->q_vector[v_idx] = NULL;
998
999 /* igb_get_stats64() might access the rings on this vector,
1000 * we must wait a grace period before freeing it.
1001 */
1002 kfree_rcu(q_vector, rcu);
1003}
1004
1005/**
1006 * igb_reset_q_vector - Reset config for interrupt vector
1007 * @adapter: board private structure to initialize
1008 * @v_idx: Index of vector to be reset
1009 *
1010 * If NAPI is enabled it will delete any references to the
1011 * NAPI struct. This is preparation for igb_free_q_vector.
1012 **/
1013static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1014{
1015 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1016
1010 if (q_vector->tx.ring) 1017 if (q_vector->tx.ring)
1011 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 1018 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1012 1019
1013 if (q_vector->rx.ring) 1020 if (q_vector->rx.ring)
1014 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; 1021 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
1015 1022
1016 adapter->q_vector[v_idx] = NULL;
1017 netif_napi_del(&q_vector->napi); 1023 netif_napi_del(&q_vector->napi);
1018 1024
1019 /* igb_get_stats64() might access the rings on this vector, 1025}
1020 * we must wait a grace period before freeing it. 1026
1021 */ 1027static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1022 kfree_rcu(q_vector, rcu); 1028{
1029 int v_idx = adapter->num_q_vectors;
1030
1031 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1032 pci_disable_msix(adapter->pdev);
1033 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1034 pci_disable_msi(adapter->pdev);
1035
1036 while (v_idx--)
1037 igb_reset_q_vector(adapter, v_idx);
1023} 1038}
1024 1039
1025/** 1040/**
@@ -1038,8 +1053,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
1038 adapter->num_rx_queues = 0; 1053 adapter->num_rx_queues = 0;
1039 adapter->num_q_vectors = 0; 1054 adapter->num_q_vectors = 0;
1040 1055
1041 while (v_idx--) 1056 while (v_idx--) {
1057 igb_reset_q_vector(adapter, v_idx);
1042 igb_free_q_vector(adapter, v_idx); 1058 igb_free_q_vector(adapter, v_idx);
1059 }
1043} 1060}
1044 1061
1045/** 1062/**
@@ -1070,6 +1087,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1070 1087
1071 if (!msix) 1088 if (!msix)
1072 goto msi_only; 1089 goto msi_only;
1090 adapter->flags |= IGB_FLAG_HAS_MSIX;
1073 1091
1074 /* Number of supported queues. */ 1092 /* Number of supported queues. */
1075 adapter->num_rx_queues = adapter->rss_queues; 1093 adapter->num_rx_queues = adapter->rss_queues;
@@ -1090,12 +1108,6 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1090 1108
1091 /* add 1 vector for link status interrupts */ 1109 /* add 1 vector for link status interrupts */
1092 numvecs++; 1110 numvecs++;
1093 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1094 GFP_KERNEL);
1095
1096 if (!adapter->msix_entries)
1097 goto msi_only;
1098
1099 for (i = 0; i < numvecs; i++) 1111 for (i = 0; i < numvecs; i++)
1100 adapter->msix_entries[i].entry = i; 1112 adapter->msix_entries[i].entry = i;
1101 1113
@@ -1172,7 +1184,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1172 (sizeof(struct igb_ring) * ring_count); 1184 (sizeof(struct igb_ring) * ring_count);
1173 1185
1174 /* allocate q_vector and rings */ 1186 /* allocate q_vector and rings */
1175 q_vector = kzalloc(size, GFP_KERNEL); 1187 q_vector = adapter->q_vector[v_idx];
1188 if (!q_vector)
1189 q_vector = kzalloc(size, GFP_KERNEL);
1176 if (!q_vector) 1190 if (!q_vector)
1177 return -ENOMEM; 1191 return -ENOMEM;
1178 1192
@@ -1370,7 +1384,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
1370 struct pci_dev *pdev = adapter->pdev; 1384 struct pci_dev *pdev = adapter->pdev;
1371 int err = 0; 1385 int err = 0;
1372 1386
1373 if (adapter->msix_entries) { 1387 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1374 err = igb_request_msix(adapter); 1388 err = igb_request_msix(adapter);
1375 if (!err) 1389 if (!err)
1376 goto request_done; 1390 goto request_done;
@@ -1414,7 +1428,7 @@ request_done:
1414 1428
1415static void igb_free_irq(struct igb_adapter *adapter) 1429static void igb_free_irq(struct igb_adapter *adapter)
1416{ 1430{
1417 if (adapter->msix_entries) { 1431 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1418 int vector = 0, i; 1432 int vector = 0, i;
1419 1433
1420 free_irq(adapter->msix_entries[vector++].vector, adapter); 1434 free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1439,7 +1453,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1439 * mapped into these registers and so clearing the bits can cause 1453 * mapped into these registers and so clearing the bits can cause
1440 * issues on the VF drivers so we only need to clear what we set 1454 * issues on the VF drivers so we only need to clear what we set
1441 */ 1455 */
1442 if (adapter->msix_entries) { 1456 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1443 u32 regval = rd32(E1000_EIAM); 1457 u32 regval = rd32(E1000_EIAM);
1444 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 1458 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1445 wr32(E1000_EIMC, adapter->eims_enable_mask); 1459 wr32(E1000_EIMC, adapter->eims_enable_mask);
@@ -1450,7 +1464,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1450 wr32(E1000_IAM, 0); 1464 wr32(E1000_IAM, 0);
1451 wr32(E1000_IMC, ~0); 1465 wr32(E1000_IMC, ~0);
1452 wrfl(); 1466 wrfl();
1453 if (adapter->msix_entries) { 1467 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1454 int i; 1468 int i;
1455 for (i = 0; i < adapter->num_q_vectors; i++) 1469 for (i = 0; i < adapter->num_q_vectors; i++)
1456 synchronize_irq(adapter->msix_entries[i].vector); 1470 synchronize_irq(adapter->msix_entries[i].vector);
@@ -1467,7 +1481,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
1467{ 1481{
1468 struct e1000_hw *hw = &adapter->hw; 1482 struct e1000_hw *hw = &adapter->hw;
1469 1483
1470 if (adapter->msix_entries) { 1484 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1471 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; 1485 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1472 u32 regval = rd32(E1000_EIAC); 1486 u32 regval = rd32(E1000_EIAC);
1473 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 1487 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
@@ -1690,7 +1704,7 @@ int igb_up(struct igb_adapter *adapter)
1690 for (i = 0; i < adapter->num_q_vectors; i++) 1704 for (i = 0; i < adapter->num_q_vectors; i++)
1691 napi_enable(&(adapter->q_vector[i]->napi)); 1705 napi_enable(&(adapter->q_vector[i]->napi));
1692 1706
1693 if (adapter->msix_entries) 1707 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1694 igb_configure_msix(adapter); 1708 igb_configure_msix(adapter);
1695 else 1709 else
1696 igb_assign_vector(adapter->q_vector[0], 0); 1710 igb_assign_vector(adapter->q_vector[0], 0);
@@ -2543,7 +2557,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2543 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); 2557 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2544 dev_info(&pdev->dev, 2558 dev_info(&pdev->dev,
2545 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 2559 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2546 adapter->msix_entries ? "MSI-X" : 2560 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
2547 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 2561 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2548 adapter->num_rx_queues, adapter->num_tx_queues); 2562 adapter->num_rx_queues, adapter->num_tx_queues);
2549 switch (hw->mac.type) { 2563 switch (hw->mac.type) {
@@ -2631,7 +2645,7 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2631 int err = 0; 2645 int err = 0;
2632 int i; 2646 int i;
2633 2647
2634 if (!adapter->msix_entries || num_vfs > 7) { 2648 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
2635 err = -EPERM; 2649 err = -EPERM;
2636 goto out; 2650 goto out;
2637 } 2651 }
@@ -4251,7 +4265,7 @@ static void igb_watchdog_task(struct work_struct *work)
4251 } 4265 }
4252 4266
4253 /* Cause software interrupt to ensure Rx ring is cleaned */ 4267 /* Cause software interrupt to ensure Rx ring is cleaned */
4254 if (adapter->msix_entries) { 4268 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
4255 u32 eics = 0; 4269 u32 eics = 0;
4256 for (i = 0; i < adapter->num_q_vectors; i++) 4270 for (i = 0; i < adapter->num_q_vectors; i++)
4257 eics |= adapter->q_vector[i]->eims_value; 4271 eics |= adapter->q_vector[i]->eims_value;
@@ -6172,7 +6186,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
6172 } 6186 }
6173 6187
6174 if (!test_bit(__IGB_DOWN, &adapter->state)) { 6188 if (!test_bit(__IGB_DOWN, &adapter->state)) {
6175 if (adapter->msix_entries) 6189 if (adapter->flags & IGB_FLAG_HAS_MSIX)
6176 wr32(E1000_EIMS, q_vector->eims_value); 6190 wr32(E1000_EIMS, q_vector->eims_value);
6177 else 6191 else
6178 igb_irq_enable(adapter); 6192 igb_irq_enable(adapter);
@@ -7539,7 +7553,7 @@ static void igb_netpoll(struct net_device *netdev)
7539 7553
7540 for (i = 0; i < adapter->num_q_vectors; i++) { 7554 for (i = 0; i < adapter->num_q_vectors; i++) {
7541 q_vector = adapter->q_vector[i]; 7555 q_vector = adapter->q_vector[i];
7542 if (adapter->msix_entries) 7556 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7543 wr32(E1000_EIMC, q_vector->eims_value); 7557 wr32(E1000_EIMC, q_vector->eims_value);
7544 else 7558 else
7545 igb_irq_disable(adapter); 7559 igb_irq_disable(adapter);
@@ -8037,7 +8051,7 @@ int igb_reinit_queues(struct igb_adapter *adapter)
8037 if (netif_running(netdev)) 8051 if (netif_running(netdev))
8038 igb_close(netdev); 8052 igb_close(netdev);
8039 8053
8040 igb_clear_interrupt_scheme(adapter); 8054 igb_reset_interrupt_capability(adapter);
8041 8055
8042 if (igb_init_interrupt_scheme(adapter, true)) { 8056 if (igb_init_interrupt_scheme(adapter, true)) {
8043 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 8057 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f38fc0a343a2..49531cd18987 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -552,8 +552,10 @@ struct hwmon_attr {
552}; 552};
553 553
554struct hwmon_buff { 554struct hwmon_buff {
555 struct device *device; 555 struct attribute_group group;
556 struct hwmon_attr *hwmon_list; 556 const struct attribute_group *groups[2];
557 struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
558 struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
557 unsigned int n_hwmon; 559 unsigned int n_hwmon;
558}; 560};
559#endif /* CONFIG_IXGBE_HWMON */ 561#endif /* CONFIG_IXGBE_HWMON */
@@ -775,7 +777,7 @@ struct ixgbe_adapter {
775 u32 vferr_refcount; 777 u32 vferr_refcount;
776 struct kobject *info_kobj; 778 struct kobject *info_kobj;
777#ifdef CONFIG_IXGBE_HWMON 779#ifdef CONFIG_IXGBE_HWMON
778 struct hwmon_buff ixgbe_hwmon_buff; 780 struct hwmon_buff *ixgbe_hwmon_buff;
779#endif /* CONFIG_IXGBE_HWMON */ 781#endif /* CONFIG_IXGBE_HWMON */
780#ifdef CONFIG_DEBUG_FS 782#ifdef CONFIG_DEBUG_FS
781 struct dentry *ixgbe_dbg_adapter; 783 struct dentry *ixgbe_dbg_adapter;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f35..e74ae3682733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -111,29 +111,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
111 unsigned int n_attr; 111 unsigned int n_attr;
112 struct hwmon_attr *ixgbe_attr; 112 struct hwmon_attr *ixgbe_attr;
113 113
114 n_attr = adapter->ixgbe_hwmon_buff.n_hwmon; 114 n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
115 ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr]; 115 ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
116 116
117 switch (type) { 117 switch (type) {
118 case IXGBE_HWMON_TYPE_LOC: 118 case IXGBE_HWMON_TYPE_LOC:
119 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; 119 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
120 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), 120 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
121 "temp%u_label", offset); 121 "temp%u_label", offset + 1);
122 break; 122 break;
123 case IXGBE_HWMON_TYPE_TEMP: 123 case IXGBE_HWMON_TYPE_TEMP:
124 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; 124 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
125 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), 125 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
126 "temp%u_input", offset); 126 "temp%u_input", offset + 1);
127 break; 127 break;
128 case IXGBE_HWMON_TYPE_CAUTION: 128 case IXGBE_HWMON_TYPE_CAUTION:
129 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; 129 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
130 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), 130 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
131 "temp%u_max", offset); 131 "temp%u_max", offset + 1);
132 break; 132 break;
133 case IXGBE_HWMON_TYPE_MAX: 133 case IXGBE_HWMON_TYPE_MAX:
134 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; 134 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
135 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), 135 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
136 "temp%u_crit", offset); 136 "temp%u_crit", offset + 1);
137 break; 137 break;
138 default: 138 default:
139 rc = -EPERM; 139 rc = -EPERM;
@@ -147,32 +147,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
147 ixgbe_attr->dev_attr.store = NULL; 147 ixgbe_attr->dev_attr.store = NULL;
148 ixgbe_attr->dev_attr.attr.mode = S_IRUGO; 148 ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
149 ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; 149 ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
150 sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
150 151
151 rc = device_create_file(&adapter->pdev->dev, 152 adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
152 &ixgbe_attr->dev_attr);
153 153
154 if (rc == 0) 154 ++adapter->ixgbe_hwmon_buff->n_hwmon;
155 ++adapter->ixgbe_hwmon_buff.n_hwmon;
156 155
157 return rc; 156 return 0;
158} 157}
159 158
160static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) 159static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
161{ 160{
162 int i;
163
164 if (adapter == NULL)
165 return;
166
167 for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
168 device_remove_file(&adapter->pdev->dev,
169 &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
170 }
171
172 kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
173
174 if (adapter->ixgbe_hwmon_buff.device)
175 hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
176} 161}
177 162
178/* called from ixgbe_main.c */ 163/* called from ixgbe_main.c */
@@ -184,9 +169,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
184/* called from ixgbe_main.c */ 169/* called from ixgbe_main.c */
185int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) 170int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
186{ 171{
187 struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; 172 struct hwmon_buff *ixgbe_hwmon;
173 struct device *hwmon_dev;
188 unsigned int i; 174 unsigned int i;
189 int n_attrs;
190 int rc = 0; 175 int rc = 0;
191 176
192 /* If this method isn't defined we don't support thermals */ 177 /* If this method isn't defined we don't support thermals */
@@ -198,23 +183,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
198 if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) 183 if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
199 goto exit; 184 goto exit;
200 185
201 /* 186 ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
202 * Allocation space for max attributs 187 GFP_KERNEL);
203 * max num sensors * values (loc, temp, max, caution) 188 if (ixgbe_hwmon == NULL) {
204 */
205 n_attrs = IXGBE_MAX_SENSORS * 4;
206 ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
207 GFP_KERNEL);
208 if (!ixgbe_hwmon->hwmon_list) {
209 rc = -ENOMEM; 189 rc = -ENOMEM;
210 goto err; 190 goto exit;
211 }
212
213 ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
214 if (IS_ERR(ixgbe_hwmon->device)) {
215 rc = PTR_ERR(ixgbe_hwmon->device);
216 goto err;
217 } 191 }
192 adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
218 193
219 for (i = 0; i < IXGBE_MAX_SENSORS; i++) { 194 for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
220 /* 195 /*
@@ -226,17 +201,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
226 201
227 /* Bail if any hwmon attr struct fails to initialize */ 202 /* Bail if any hwmon attr struct fails to initialize */
228 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); 203 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
229 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
230 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
231 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
232 if (rc) 204 if (rc)
233 goto err; 205 goto exit;
206 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
207 if (rc)
208 goto exit;
209 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
210 if (rc)
211 goto exit;
212 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
213 if (rc)
214 goto exit;
234 } 215 }
235 216
236 goto exit; 217 ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
218 ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
237 219
238err: 220 hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
239 ixgbe_sysfs_del_adapter(adapter); 221 "ixgbe",
222 ixgbe_hwmon,
223 ixgbe_hwmon->groups);
224 if (IS_ERR(hwmon_dev))
225 rc = PTR_ERR(hwmon_dev);
240exit: 226exit:
241 return rc; 227 return rc;
242} 228}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index eaaa3ecacfd7..bb76e96f8278 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -354,6 +354,7 @@ struct ixgbevf_adapter {
354 u32 flags; 354 u32 flags;
355#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) 355#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
356#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) 356#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
357#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
357 358
358 /* OS defined structs */ 359 /* OS defined structs */
359 struct net_device *netdev; 360 struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 84b55fe71546..a5d31674ff42 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -95,6 +95,7 @@ module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96 96
97/* forward decls */ 97/* forward decls */
98static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
98static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
99static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
100 101
@@ -1368,11 +1369,51 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1368 } 1369 }
1369} 1370}
1370 1371
1372static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1373{
1374 struct ixgbe_hw *hw = &adapter->hw;
1375 unsigned int def_q = 0;
1376 unsigned int num_tcs = 0;
1377 unsigned int num_rx_queues = 1;
1378 int err;
1379
1380 spin_lock_bh(&adapter->mbx_lock);
1381
1382 /* fetch queue configuration from the PF */
1383 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1384
1385 spin_unlock_bh(&adapter->mbx_lock);
1386
1387 if (err)
1388 return err;
1389
1390 if (num_tcs > 1) {
1391 /* update default Tx ring register index */
1392 adapter->tx_ring[0].reg_idx = def_q;
1393
1394 /* we need as many queues as traffic classes */
1395 num_rx_queues = num_tcs;
1396 }
1397
1398 /* if we have a bad config abort request queue reset */
1399 if (adapter->num_rx_queues != num_rx_queues) {
1400 /* force mailbox timeout to prevent further messages */
1401 hw->mbx.timeout = 0;
1402
1403 /* wait for watchdog to come around and bail us out */
1404 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1405 }
1406
1407 return 0;
1408}
1409
1371static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1410static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1372{ 1411{
1373 struct net_device *netdev = adapter->netdev; 1412 struct net_device *netdev = adapter->netdev;
1374 int i; 1413 int i;
1375 1414
1415 ixgbevf_configure_dcb(adapter);
1416
1376 ixgbevf_set_rx_mode(netdev); 1417 ixgbevf_set_rx_mode(netdev);
1377 1418
1378 ixgbevf_restore_vlan(adapter); 1419 ixgbevf_restore_vlan(adapter);
@@ -1551,85 +1592,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1551 mod_timer(&adapter->watchdog_timer, jiffies); 1592 mod_timer(&adapter->watchdog_timer, jiffies);
1552} 1593}
1553 1594
1554static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1555{
1556 struct ixgbe_hw *hw = &adapter->hw;
1557 struct ixgbevf_ring *rx_ring;
1558 unsigned int def_q = 0;
1559 unsigned int num_tcs = 0;
1560 unsigned int num_rx_queues = 1;
1561 int err, i;
1562
1563 spin_lock_bh(&adapter->mbx_lock);
1564
1565 /* fetch queue configuration from the PF */
1566 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1567
1568 spin_unlock_bh(&adapter->mbx_lock);
1569
1570 if (err)
1571 return err;
1572
1573 if (num_tcs > 1) {
1574 /* update default Tx ring register index */
1575 adapter->tx_ring[0].reg_idx = def_q;
1576
1577 /* we need as many queues as traffic classes */
1578 num_rx_queues = num_tcs;
1579 }
1580
1581 /* nothing to do if we have the correct number of queues */
1582 if (adapter->num_rx_queues == num_rx_queues)
1583 return 0;
1584
1585 /* allocate new rings */
1586 rx_ring = kcalloc(num_rx_queues,
1587 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1588 if (!rx_ring)
1589 return -ENOMEM;
1590
1591 /* setup ring fields */
1592 for (i = 0; i < num_rx_queues; i++) {
1593 rx_ring[i].count = adapter->rx_ring_count;
1594 rx_ring[i].queue_index = i;
1595 rx_ring[i].reg_idx = i;
1596 rx_ring[i].dev = &adapter->pdev->dev;
1597 rx_ring[i].netdev = adapter->netdev;
1598
1599 /* allocate resources on the ring */
1600 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1601 if (err) {
1602 while (i) {
1603 i--;
1604 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1605 }
1606 kfree(rx_ring);
1607 return err;
1608 }
1609 }
1610
1611 /* free the existing rings and queues */
1612 ixgbevf_free_all_rx_resources(adapter);
1613 adapter->num_rx_queues = 0;
1614 kfree(adapter->rx_ring);
1615
1616 /* move new rings into position on the adapter struct */
1617 adapter->rx_ring = rx_ring;
1618 adapter->num_rx_queues = num_rx_queues;
1619
1620 /* reset ring to vector mapping */
1621 ixgbevf_reset_q_vectors(adapter);
1622 ixgbevf_map_rings_to_vectors(adapter);
1623
1624 return 0;
1625}
1626
1627void ixgbevf_up(struct ixgbevf_adapter *adapter) 1595void ixgbevf_up(struct ixgbevf_adapter *adapter)
1628{ 1596{
1629 struct ixgbe_hw *hw = &adapter->hw; 1597 struct ixgbe_hw *hw = &adapter->hw;
1630 1598
1631 ixgbevf_reset_queues(adapter);
1632
1633 ixgbevf_configure(adapter); 1599 ixgbevf_configure(adapter);
1634 1600
1635 ixgbevf_up_complete(adapter); 1601 ixgbevf_up_complete(adapter);
@@ -1875,9 +1841,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1875 **/ 1841 **/
1876static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1842static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1877{ 1843{
1844 struct ixgbe_hw *hw = &adapter->hw;
1845 unsigned int def_q = 0;
1846 unsigned int num_tcs = 0;
1847 int err;
1848
1878 /* Start with base case */ 1849 /* Start with base case */
1879 adapter->num_rx_queues = 1; 1850 adapter->num_rx_queues = 1;
1880 adapter->num_tx_queues = 1; 1851 adapter->num_tx_queues = 1;
1852
1853 spin_lock_bh(&adapter->mbx_lock);
1854
1855 /* fetch queue configuration from the PF */
1856 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1857
1858 spin_unlock_bh(&adapter->mbx_lock);
1859
1860 if (err)
1861 return;
1862
1863 /* we need as many queues as traffic classes */
1864 if (num_tcs > 1)
1865 adapter->num_rx_queues = num_tcs;
1881} 1866}
1882 1867
1883/** 1868/**
@@ -2326,6 +2311,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2326 bool link_up = adapter->link_up; 2311 bool link_up = adapter->link_up;
2327 s32 need_reset; 2312 s32 need_reset;
2328 2313
2314 ixgbevf_queue_reset_subtask(adapter);
2315
2329 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2316 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2330 2317
2331 /* 2318 /*
@@ -2595,63 +2582,6 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2595 &adapter->rx_ring[i]); 2582 &adapter->rx_ring[i]);
2596} 2583}
2597 2584
2598static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2599{
2600 struct ixgbe_hw *hw = &adapter->hw;
2601 struct ixgbevf_ring *rx_ring;
2602 unsigned int def_q = 0;
2603 unsigned int num_tcs = 0;
2604 unsigned int num_rx_queues = 1;
2605 int err, i;
2606
2607 spin_lock_bh(&adapter->mbx_lock);
2608
2609 /* fetch queue configuration from the PF */
2610 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2611
2612 spin_unlock_bh(&adapter->mbx_lock);
2613
2614 if (err)
2615 return err;
2616
2617 if (num_tcs > 1) {
2618 /* update default Tx ring register index */
2619 adapter->tx_ring[0].reg_idx = def_q;
2620
2621 /* we need as many queues as traffic classes */
2622 num_rx_queues = num_tcs;
2623 }
2624
2625 /* nothing to do if we have the correct number of queues */
2626 if (adapter->num_rx_queues == num_rx_queues)
2627 return 0;
2628
2629 /* allocate new rings */
2630 rx_ring = kcalloc(num_rx_queues,
2631 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2632 if (!rx_ring)
2633 return -ENOMEM;
2634
2635 /* setup ring fields */
2636 for (i = 0; i < num_rx_queues; i++) {
2637 rx_ring[i].count = adapter->rx_ring_count;
2638 rx_ring[i].queue_index = i;
2639 rx_ring[i].reg_idx = i;
2640 rx_ring[i].dev = &adapter->pdev->dev;
2641 rx_ring[i].netdev = adapter->netdev;
2642 }
2643
2644 /* free the existing ring and queues */
2645 adapter->num_rx_queues = 0;
2646 kfree(adapter->rx_ring);
2647
2648 /* move new rings into position on the adapter struct */
2649 adapter->rx_ring = rx_ring;
2650 adapter->num_rx_queues = num_rx_queues;
2651
2652 return 0;
2653}
2654
2655/** 2585/**
2656 * ixgbevf_open - Called when a network interface is made active 2586 * ixgbevf_open - Called when a network interface is made active
2657 * @netdev: network interface device structure 2587 * @netdev: network interface device structure
@@ -2695,11 +2625,6 @@ static int ixgbevf_open(struct net_device *netdev)
2695 } 2625 }
2696 } 2626 }
2697 2627
2698 /* setup queue reg_idx and Rx queue count */
2699 err = ixgbevf_setup_queues(adapter);
2700 if (err)
2701 goto err_setup_queues;
2702
2703 /* allocate transmit descriptors */ 2628 /* allocate transmit descriptors */
2704 err = ixgbevf_setup_all_tx_resources(adapter); 2629 err = ixgbevf_setup_all_tx_resources(adapter);
2705 if (err) 2630 if (err)
@@ -2737,7 +2662,6 @@ err_setup_rx:
2737 ixgbevf_free_all_rx_resources(adapter); 2662 ixgbevf_free_all_rx_resources(adapter);
2738err_setup_tx: 2663err_setup_tx:
2739 ixgbevf_free_all_tx_resources(adapter); 2664 ixgbevf_free_all_tx_resources(adapter);
2740err_setup_queues:
2741 ixgbevf_reset(adapter); 2665 ixgbevf_reset(adapter);
2742 2666
2743err_setup_reset: 2667err_setup_reset:
@@ -2769,6 +2693,34 @@ static int ixgbevf_close(struct net_device *netdev)
2769 return 0; 2693 return 0;
2770} 2694}
2771 2695
2696static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2697{
2698 struct net_device *dev = adapter->netdev;
2699
2700 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2701 return;
2702
2703 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2704
2705 /* if interface is down do nothing */
2706 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2707 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2708 return;
2709
2710 /* Hardware has to reinitialize queues and interrupts to
2711 * match packet buffer alignment. Unfortunately, the
2712 * hardware is not flexible enough to do this dynamically.
2713 */
2714 if (netif_running(dev))
2715 ixgbevf_close(dev);
2716
2717 ixgbevf_clear_interrupt_scheme(adapter);
2718 ixgbevf_init_interrupt_scheme(adapter);
2719
2720 if (netif_running(dev))
2721 ixgbevf_open(dev);
2722}
2723
2772static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2724static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2773 u32 vlan_macip_lens, u32 type_tucmd, 2725 u32 vlan_macip_lens, u32 type_tucmd,
2774 u32 mss_l4len_idx) 2726 u32 mss_l4len_idx)