aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c8
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c418
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c539
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h22
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c146
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c656
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h50
14 files changed, 1398 insertions, 652 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae3718c..d0ea3d6dea95 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
111 u16 default_vf_vlan_id; 111 u16 default_vf_vlan_id;
112 u16 vlans_enabled; 112 u16 vlans_enabled;
113 bool clear_to_send; 113 bool clear_to_send;
114 bool pf_set_mac;
114 int rar; 115 int rar;
116 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
117 u16 pf_qos;
115}; 118};
116 119
117/* wrapper around a pointer to a socket buffer, 120/* wrapper around a pointer to a socket buffer,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 35a06b47587b..f2b7ff44215b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -42,9 +42,9 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed, 42 ixgbe_link_speed *speed,
43 bool *autoneg); 43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed, 45 ixgbe_link_speed speed,
46 bool autoneg, 46 bool autoneg,
47 bool autoneg_wait_to_complete); 47 bool autoneg_wait_to_complete);
48static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 48static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
49 u8 *eeprom_data); 49 u8 *eeprom_data);
50 50
@@ -1221,7 +1221,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1221 1221
1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1223 .init_params = &ixgbe_init_eeprom_params_generic, 1223 .init_params = &ixgbe_init_eeprom_params_generic,
1224 .read = &ixgbe_read_eeprom_generic, 1224 .read = &ixgbe_read_eerd_generic,
1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1227}; 1227};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 12fc0e7ba2ca..e9706eb8e4ff 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -133,27 +133,6 @@ setup_sfp_out:
133 return ret_val; 133 return ret_val;
134} 134}
135 135
136/**
137 * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
138 * @hw: pointer to hardware structure
139 *
140 * Read PCIe configuration space, and get the MSI-X vector count from
141 * the capabilities table.
142 **/
143static u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
144{
145 struct ixgbe_adapter *adapter = hw->back;
146 u16 msix_count;
147 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
148 &msix_count);
149 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
150
151 /* MSI-X count is zero-based in HW, so increment to give proper value */
152 msix_count++;
153
154 return msix_count;
155}
156
157static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 136static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
158{ 137{
159 struct ixgbe_mac_info *mac = &hw->mac; 138 struct ixgbe_mac_info *mac = &hw->mac;
@@ -165,7 +144,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
165 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 144 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
166 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 145 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
167 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 146 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
168 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw); 147 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
169 148
170 return 0; 149 return 0;
171} 150}
@@ -642,6 +621,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
642 s32 i, j; 621 s32 i, j;
643 bool link_up = false; 622 bool link_up = false;
644 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 623 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
624 struct ixgbe_adapter *adapter = hw->back;
645 625
646 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); 626 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
647 627
@@ -726,64 +706,14 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
726 autoneg_wait_to_complete); 706 autoneg_wait_to_complete);
727 707
728out: 708out:
709 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
710 netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
711 " downgraded the link speed from the maximum"
712 " advertised\n");
729 return status; 713 return status;
730} 714}
731 715
732/** 716/**
733 * ixgbe_check_mac_link_82599 - Determine link and speed status
734 * @hw: pointer to hardware structure
735 * @speed: pointer to link speed
736 * @link_up: true when link is up
737 * @link_up_wait_to_complete: bool used to wait for link up or not
738 *
739 * Reads the links register to determine if link is up and the current speed
740 **/
741static s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
742 ixgbe_link_speed *speed,
743 bool *link_up,
744 bool link_up_wait_to_complete)
745{
746 u32 links_reg;
747 u32 i;
748
749 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
750 if (link_up_wait_to_complete) {
751 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
752 if (links_reg & IXGBE_LINKS_UP) {
753 *link_up = true;
754 break;
755 } else {
756 *link_up = false;
757 }
758 msleep(100);
759 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
760 }
761 } else {
762 if (links_reg & IXGBE_LINKS_UP)
763 *link_up = true;
764 else
765 *link_up = false;
766 }
767
768 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
769 IXGBE_LINKS_SPEED_10G_82599)
770 *speed = IXGBE_LINK_SPEED_10GB_FULL;
771 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
772 IXGBE_LINKS_SPEED_1G_82599)
773 *speed = IXGBE_LINK_SPEED_1GB_FULL;
774 else
775 *speed = IXGBE_LINK_SPEED_100_FULL;
776
777 /* if link is down, zero out the current_mode */
778 if (*link_up == false) {
779 hw->fc.current_mode = ixgbe_fc_none;
780 hw->fc.fc_was_autonegged = false;
781 }
782
783 return 0;
784}
785
786/**
787 * ixgbe_setup_mac_link_82599 - Set MAC link speed 717 * ixgbe_setup_mac_link_82599 - Set MAC link speed
788 * @hw: pointer to hardware structure 718 * @hw: pointer to hardware structure
789 * @speed: new link speed 719 * @speed: new link speed
@@ -1045,243 +975,6 @@ reset_hw_out:
1045} 975}
1046 976
1047/** 977/**
1048 * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
1049 * @hw: pointer to hardware struct
1050 * @rar: receive address register index to disassociate
1051 * @vmdq: VMDq pool index to remove from the rar
1052 **/
1053static s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1054{
1055 u32 mpsar_lo, mpsar_hi;
1056 u32 rar_entries = hw->mac.num_rar_entries;
1057
1058 if (rar < rar_entries) {
1059 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1060 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1061
1062 if (!mpsar_lo && !mpsar_hi)
1063 goto done;
1064
1065 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
1066 if (mpsar_lo) {
1067 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
1068 mpsar_lo = 0;
1069 }
1070 if (mpsar_hi) {
1071 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
1072 mpsar_hi = 0;
1073 }
1074 } else if (vmdq < 32) {
1075 mpsar_lo &= ~(1 << vmdq);
1076 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
1077 } else {
1078 mpsar_hi &= ~(1 << (vmdq - 32));
1079 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
1080 }
1081
1082 /* was that the last pool using this rar? */
1083 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
1084 hw->mac.ops.clear_rar(hw, rar);
1085 } else {
1086 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
1087 }
1088
1089done:
1090 return 0;
1091}
1092
1093/**
1094 * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
1095 * @hw: pointer to hardware struct
1096 * @rar: receive address register index to associate with a VMDq index
1097 * @vmdq: VMDq pool index
1098 **/
1099static s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1100{
1101 u32 mpsar;
1102 u32 rar_entries = hw->mac.num_rar_entries;
1103
1104 if (rar < rar_entries) {
1105 if (vmdq < 32) {
1106 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1107 mpsar |= 1 << vmdq;
1108 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
1109 } else {
1110 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1111 mpsar |= 1 << (vmdq - 32);
1112 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
1113 }
1114 } else {
1115 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
1116 }
1117 return 0;
1118}
1119
1120/**
1121 * ixgbe_set_vfta_82599 - Set VLAN filter table
1122 * @hw: pointer to hardware structure
1123 * @vlan: VLAN id to write to VLAN filter
1124 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
1125 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
1126 *
1127 * Turn on/off specified VLAN in the VLAN filter table.
1128 **/
1129static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1130 bool vlan_on)
1131{
1132 u32 regindex;
1133 u32 vlvf_index;
1134 u32 bitindex;
1135 u32 bits;
1136 u32 first_empty_slot;
1137 u32 vt_ctl;
1138
1139 if (vlan > 4095)
1140 return IXGBE_ERR_PARAM;
1141
1142 /*
1143 * this is a 2 part operation - first the VFTA, then the
1144 * VLVF and VLVFB if vind is set
1145 */
1146
1147 /* Part 1
1148 * The VFTA is a bitstring made up of 128 32-bit registers
1149 * that enable the particular VLAN id, much like the MTA:
1150 * bits[11-5]: which register
1151 * bits[4-0]: which bit in the register
1152 */
1153 regindex = (vlan >> 5) & 0x7F;
1154 bitindex = vlan & 0x1F;
1155 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1156 if (vlan_on)
1157 bits |= (1 << bitindex);
1158 else
1159 bits &= ~(1 << bitindex);
1160 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1161
1162
1163 /* Part 2
1164 * If VT mode is set
1165 * Either vlan_on
1166 * make sure the vlan is in VLVF
1167 * set the vind bit in the matching VLVFB
1168 * Or !vlan_on
1169 * clear the pool bit and possibly the vind
1170 */
1171 vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1172 if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
1173 goto out;
1174
1175 /* find the vlanid or the first empty slot */
1176 first_empty_slot = 0;
1177
1178 for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
1179 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
1180 if (!bits && !first_empty_slot)
1181 first_empty_slot = vlvf_index;
1182 else if ((bits & 0x0FFF) == vlan)
1183 break;
1184 }
1185
1186 if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
1187 if (first_empty_slot)
1188 vlvf_index = first_empty_slot;
1189 else {
1190 hw_dbg(hw, "No space in VLVF.\n");
1191 goto out;
1192 }
1193 }
1194
1195 if (vlan_on) {
1196 /* set the pool bit */
1197 if (vind < 32) {
1198 bits = IXGBE_READ_REG(hw,
1199 IXGBE_VLVFB(vlvf_index * 2));
1200 bits |= (1 << vind);
1201 IXGBE_WRITE_REG(hw,
1202 IXGBE_VLVFB(vlvf_index * 2), bits);
1203 } else {
1204 bits = IXGBE_READ_REG(hw,
1205 IXGBE_VLVFB((vlvf_index * 2) + 1));
1206 bits |= (1 << (vind - 32));
1207 IXGBE_WRITE_REG(hw,
1208 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1209 }
1210 } else {
1211 /* clear the pool bit */
1212 if (vind < 32) {
1213 bits = IXGBE_READ_REG(hw,
1214 IXGBE_VLVFB(vlvf_index * 2));
1215 bits &= ~(1 << vind);
1216 IXGBE_WRITE_REG(hw,
1217 IXGBE_VLVFB(vlvf_index * 2), bits);
1218 bits |= IXGBE_READ_REG(hw,
1219 IXGBE_VLVFB((vlvf_index * 2) + 1));
1220 } else {
1221 bits = IXGBE_READ_REG(hw,
1222 IXGBE_VLVFB((vlvf_index * 2) + 1));
1223 bits &= ~(1 << (vind - 32));
1224 IXGBE_WRITE_REG(hw,
1225 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1226 bits |= IXGBE_READ_REG(hw,
1227 IXGBE_VLVFB(vlvf_index * 2));
1228 }
1229 }
1230
1231 if (bits) {
1232 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
1233 (IXGBE_VLVF_VIEN | vlan));
1234 /* if bits is non-zero then some pools/VFs are still
1235 * using this VLAN ID. Force the VFTA entry to on */
1236 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1237 bits |= (1 << bitindex);
1238 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1239 }
1240 else
1241 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
1242
1243out:
1244 return 0;
1245}
1246
1247/**
1248 * ixgbe_clear_vfta_82599 - Clear VLAN filter table
1249 * @hw: pointer to hardware structure
1250 *
1251 * Clears the VLAN filer table, and the VMDq index associated with the filter
1252 **/
1253static s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
1254{
1255 u32 offset;
1256
1257 for (offset = 0; offset < hw->mac.vft_size; offset++)
1258 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1259
1260 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
1261 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
1262 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
1263 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
1264 }
1265
1266 return 0;
1267}
1268
1269/**
1270 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
1271 * @hw: pointer to hardware structure
1272 **/
1273static s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
1274{
1275 int i;
1276 hw_dbg(hw, " Clearing UTA\n");
1277
1278 for (i = 0; i < 128; i++)
1279 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
1280
1281 return 0;
1282}
1283
1284/**
1285 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 978 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1286 * @hw: pointer to hardware structure 979 * @hw: pointer to hardware structure
1287 **/ 980 **/
@@ -1303,7 +996,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1303 } 996 }
1304 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 997 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1305 hw_dbg(hw ,"Flow Director previous command isn't complete, " 998 hw_dbg(hw ,"Flow Director previous command isn't complete, "
1306 "aborting table re-initialization. \n"); 999 "aborting table re-initialization.\n");
1307 return IXGBE_ERR_FDIR_REINIT_FAILED; 1000 return IXGBE_ERR_FDIR_REINIT_FAILED;
1308 } 1001 }
1309 1002
@@ -2462,10 +2155,14 @@ sfp_check:
2462 goto out; 2155 goto out;
2463 2156
2464 switch (hw->phy.type) { 2157 switch (hw->phy.type) {
2465 case ixgbe_phy_tw_tyco: 2158 case ixgbe_phy_sfp_passive_tyco:
2466 case ixgbe_phy_tw_unknown: 2159 case ixgbe_phy_sfp_passive_unknown:
2467 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2160 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2468 break; 2161 break;
2162 case ixgbe_phy_sfp_ftl_active:
2163 case ixgbe_phy_sfp_active_unknown:
2164 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2165 break;
2469 case ixgbe_phy_sfp_avago: 2166 case ixgbe_phy_sfp_avago:
2470 case ixgbe_phy_sfp_ftl: 2167 case ixgbe_phy_sfp_ftl:
2471 case ixgbe_phy_sfp_intel: 2168 case ixgbe_phy_sfp_intel:
@@ -2545,75 +2242,6 @@ static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2545} 2242}
2546 2243
2547/** 2244/**
2548 * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
2549 * @hw: pointer to hardware structure
2550 * @san_mac_offset: SAN MAC address offset
2551 *
2552 * This function will read the EEPROM location for the SAN MAC address
2553 * pointer, and returns the value at that location. This is used in both
2554 * get and set mac_addr routines.
2555 **/
2556static s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
2557 u16 *san_mac_offset)
2558{
2559 /*
2560 * First read the EEPROM pointer to see if the MAC addresses are
2561 * available.
2562 */
2563 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2564
2565 return 0;
2566}
2567
2568/**
2569 * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
2570 * @hw: pointer to hardware structure
2571 * @san_mac_addr: SAN MAC address
2572 *
2573 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2574 * per-port, so set_lan_id() must be called before reading the addresses.
2575 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2576 * upon for non-SFP connections, so we must call it here.
2577 **/
2578static s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
2579{
2580 u16 san_mac_data, san_mac_offset;
2581 u8 i;
2582
2583 /*
2584 * First read the EEPROM pointer to see if the MAC addresses are
2585 * available. If they're not, no point in calling set_lan_id() here.
2586 */
2587 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
2588
2589 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2590 /*
2591 * No addresses available in this EEPROM. It's not an
2592 * error though, so just wipe the local address and return.
2593 */
2594 for (i = 0; i < 6; i++)
2595 san_mac_addr[i] = 0xFF;
2596
2597 goto san_mac_addr_out;
2598 }
2599
2600 /* make sure we know which port we need to program */
2601 hw->mac.ops.set_lan_id(hw);
2602 /* apply the port offset to the address offset */
2603 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2604 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2605 for (i = 0; i < 3; i++) {
2606 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2607 san_mac_addr[i * 2] = (u8)(san_mac_data);
2608 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2609 san_mac_offset++;
2610 }
2611
2612san_mac_addr_out:
2613 return 0;
2614}
2615
2616/**
2617 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2245 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2618 * @hw: pointer to hardware structure 2246 * @hw: pointer to hardware structure
2619 * 2247 *
@@ -2715,7 +2343,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2715 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, 2343 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
2716 .enable_rx_dma = &ixgbe_enable_rx_dma_82599, 2344 .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
2717 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2345 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2718 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599, 2346 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2719 .get_device_caps = &ixgbe_get_device_caps_82599, 2347 .get_device_caps = &ixgbe_get_device_caps_82599,
2720 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, 2348 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
2721 .stop_adapter = &ixgbe_stop_adapter_generic, 2349 .stop_adapter = &ixgbe_stop_adapter_generic,
@@ -2724,7 +2352,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2724 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, 2352 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
2725 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, 2353 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
2726 .setup_link = &ixgbe_setup_mac_link_82599, 2354 .setup_link = &ixgbe_setup_mac_link_82599,
2727 .check_link = &ixgbe_check_mac_link_82599, 2355 .check_link = &ixgbe_check_mac_link_generic,
2728 .get_link_capabilities = &ixgbe_get_link_capabilities_82599, 2356 .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
2729 .led_on = &ixgbe_led_on_generic, 2357 .led_on = &ixgbe_led_on_generic,
2730 .led_off = &ixgbe_led_off_generic, 2358 .led_off = &ixgbe_led_off_generic,
@@ -2732,23 +2360,23 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2732 .blink_led_stop = &ixgbe_blink_led_stop_generic, 2360 .blink_led_stop = &ixgbe_blink_led_stop_generic,
2733 .set_rar = &ixgbe_set_rar_generic, 2361 .set_rar = &ixgbe_set_rar_generic,
2734 .clear_rar = &ixgbe_clear_rar_generic, 2362 .clear_rar = &ixgbe_clear_rar_generic,
2735 .set_vmdq = &ixgbe_set_vmdq_82599, 2363 .set_vmdq = &ixgbe_set_vmdq_generic,
2736 .clear_vmdq = &ixgbe_clear_vmdq_82599, 2364 .clear_vmdq = &ixgbe_clear_vmdq_generic,
2737 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2365 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
2738 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, 2366 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
2739 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2367 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2740 .enable_mc = &ixgbe_enable_mc_generic, 2368 .enable_mc = &ixgbe_enable_mc_generic,
2741 .disable_mc = &ixgbe_disable_mc_generic, 2369 .disable_mc = &ixgbe_disable_mc_generic,
2742 .clear_vfta = &ixgbe_clear_vfta_82599, 2370 .clear_vfta = &ixgbe_clear_vfta_generic,
2743 .set_vfta = &ixgbe_set_vfta_82599, 2371 .set_vfta = &ixgbe_set_vfta_generic,
2744 .fc_enable = &ixgbe_fc_enable_generic, 2372 .fc_enable = &ixgbe_fc_enable_generic,
2745 .init_uta_tables = &ixgbe_init_uta_tables_82599, 2373 .init_uta_tables = &ixgbe_init_uta_tables_generic,
2746 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2374 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2747}; 2375};
2748 2376
2749static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2377static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2750 .init_params = &ixgbe_init_eeprom_params_generic, 2378 .init_params = &ixgbe_init_eeprom_params_generic,
2751 .read = &ixgbe_read_eeprom_generic, 2379 .read = &ixgbe_read_eerd_generic,
2752 .write = &ixgbe_write_eeprom_generic, 2380 .write = &ixgbe_write_eeprom_generic,
2753 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2381 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2754 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2382 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
@@ -2757,7 +2385,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2757static struct ixgbe_phy_operations phy_ops_82599 = { 2385static struct ixgbe_phy_operations phy_ops_82599 = {
2758 .identify = &ixgbe_identify_phy_82599, 2386 .identify = &ixgbe_identify_phy_82599,
2759 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2387 .identify_sfp = &ixgbe_identify_sfp_module_generic,
2760 .init = &ixgbe_init_phy_ops_82599, 2388 .init = &ixgbe_init_phy_ops_82599,
2761 .reset = &ixgbe_reset_phy_generic, 2389 .reset = &ixgbe_reset_phy_generic,
2762 .read_reg = &ixgbe_read_phy_reg_generic, 2390 .read_reg = &ixgbe_read_phy_reg_generic,
2763 .write_reg = &ixgbe_write_phy_reg_generic, 2391 .write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c1..1159d9138f05 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -34,7 +34,6 @@
34#include "ixgbe_common.h" 34#include "ixgbe_common.h"
35#include "ixgbe_phy.h" 35#include "ixgbe_phy.h"
36 36
37static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
38static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 37static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 38static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 39static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -595,14 +594,14 @@ out:
595} 594}
596 595
597/** 596/**
598 * ixgbe_read_eeprom_generic - Read EEPROM word using EERD 597 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
599 * @hw: pointer to hardware structure 598 * @hw: pointer to hardware structure
600 * @offset: offset of word in the EEPROM to read 599 * @offset: offset of word in the EEPROM to read
601 * @data: word read from the EEPROM 600 * @data: word read from the EEPROM
602 * 601 *
603 * Reads a 16 bit word from the EEPROM using the EERD register. 602 * Reads a 16 bit word from the EEPROM using the EERD register.
604 **/ 603 **/
605s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 604s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
606{ 605{
607 u32 eerd; 606 u32 eerd;
608 s32 status; 607 s32 status;
@@ -614,15 +613,15 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
614 goto out; 613 goto out;
615 } 614 }
616 615
617 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + 616 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
618 IXGBE_EEPROM_READ_REG_START; 617 IXGBE_EEPROM_RW_REG_START;
619 618
620 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 619 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
621 status = ixgbe_poll_eeprom_eerd_done(hw); 620 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
622 621
623 if (status == 0) 622 if (status == 0)
624 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 623 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
625 IXGBE_EEPROM_READ_REG_DATA); 624 IXGBE_EEPROM_RW_REG_DATA);
626 else 625 else
627 hw_dbg(hw, "Eeprom read timed out\n"); 626 hw_dbg(hw, "Eeprom read timed out\n");
628 627
@@ -631,20 +630,26 @@ out:
631} 630}
632 631
633/** 632/**
634 * ixgbe_poll_eeprom_eerd_done - Poll EERD status 633 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
635 * @hw: pointer to hardware structure 634 * @hw: pointer to hardware structure
635 * @ee_reg: EEPROM flag for polling
636 * 636 *
637 * Polls the status bit (bit 1) of the EERD to determine when the read is done. 637 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
638 * read or write is done respectively.
638 **/ 639 **/
639static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw) 640s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
640{ 641{
641 u32 i; 642 u32 i;
642 u32 reg; 643 u32 reg;
643 s32 status = IXGBE_ERR_EEPROM; 644 s32 status = IXGBE_ERR_EEPROM;
644 645
645 for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) { 646 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
646 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 647 if (ee_reg == IXGBE_NVM_POLL_READ)
647 if (reg & IXGBE_EEPROM_READ_REG_DONE) { 648 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
649 else
650 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
651
652 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
648 status = 0; 653 status = 0;
649 break; 654 break;
650 } 655 }
@@ -1392,14 +1397,17 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1392 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1397 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1393 fctrl |= IXGBE_FCTRL_UPE; 1398 fctrl |= IXGBE_FCTRL_UPE;
1394 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1399 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1400 hw->addr_ctrl.uc_set_promisc = true;
1395 } 1401 }
1396 } else { 1402 } else {
1397 /* only disable if set by overflow, not by user */ 1403 /* only disable if set by overflow, not by user */
1398 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1404 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1405 !(hw->addr_ctrl.user_set_promisc)) {
1399 hw_dbg(hw, " Leaving address overflow promisc mode\n"); 1406 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1400 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1407 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1401 fctrl &= ~IXGBE_FCTRL_UPE; 1408 fctrl &= ~IXGBE_FCTRL_UPE;
1402 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1409 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1410 hw->addr_ctrl.uc_set_promisc = false;
1403 } 1411 }
1404 } 1412 }
1405 1413
@@ -1484,26 +1492,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1484/** 1492/**
1485 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1493 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1486 * @hw: pointer to hardware structure 1494 * @hw: pointer to hardware structure
1487 * @mc_addr_list: the list of new multicast addresses 1495 * @netdev: pointer to net device structure
1488 * @mc_addr_count: number of addresses
1489 * @next: iterator function to walk the multicast address list
1490 * 1496 *
1491 * The given list replaces any existing list. Clears the MC addrs from receive 1497 * The given list replaces any existing list. Clears the MC addrs from receive
1492 * address registers and the multicast table. Uses unused receive address 1498 * address registers and the multicast table. Uses unused receive address
1493 * registers for the first multicast addresses, and hashes the rest into the 1499 * registers for the first multicast addresses, and hashes the rest into the
1494 * multicast table. 1500 * multicast table.
1495 **/ 1501 **/
1496s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1502s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1497 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1503 struct net_device *netdev)
1498{ 1504{
1505 struct netdev_hw_addr *ha;
1499 u32 i; 1506 u32 i;
1500 u32 vmdq;
1501 1507
1502 /* 1508 /*
1503 * Set the new number of MC addresses that we are being requested to 1509 * Set the new number of MC addresses that we are being requested to
1504 * use. 1510 * use.
1505 */ 1511 */
1506 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1512 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1507 hw->addr_ctrl.mta_in_use = 0; 1513 hw->addr_ctrl.mta_in_use = 0;
1508 1514
1509 /* Clear the MTA */ 1515 /* Clear the MTA */
@@ -1512,9 +1518,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
1512 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1518 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1513 1519
1514 /* Add the new addresses */ 1520 /* Add the new addresses */
1515 for (i = 0; i < mc_addr_count; i++) { 1521 netdev_for_each_mc_addr(ha, netdev) {
1516 hw_dbg(hw, " Adding the multicast addresses:\n"); 1522 hw_dbg(hw, " Adding the multicast addresses:\n");
1517 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1523 ixgbe_set_mta(hw, ha->addr);
1518 } 1524 }
1519 1525
1520 /* Enable mta */ 1526 /* Enable mta */
@@ -2254,3 +2260,490 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2254 2260
2255 return 0; 2261 return 0;
2256} 2262}
2263
2264/**
2265 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2266 * @hw: pointer to hardware structure
2267 * @san_mac_offset: SAN MAC address offset
2268 *
2269 * This function will read the EEPROM location for the SAN MAC address
2270 * pointer, and returns the value at that location. This is used in both
2271 * get and set mac_addr routines.
2272 **/
2273static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2274 u16 *san_mac_offset)
2275{
2276 /*
2277 * First read the EEPROM pointer to see if the MAC addresses are
2278 * available.
2279 */
2280 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2281
2282 return 0;
2283}
2284
2285/**
2286 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2287 * @hw: pointer to hardware structure
2288 * @san_mac_addr: SAN MAC address
2289 *
2290 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2291 * per-port, so set_lan_id() must be called before reading the addresses.
2292 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2293 * upon for non-SFP connections, so we must call it here.
2294 **/
2295s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2296{
2297 u16 san_mac_data, san_mac_offset;
2298 u8 i;
2299
2300 /*
2301 * First read the EEPROM pointer to see if the MAC addresses are
2302 * available. If they're not, no point in calling set_lan_id() here.
2303 */
2304 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2305
2306 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2307 /*
2308 * No addresses available in this EEPROM. It's not an
2309 * error though, so just wipe the local address and return.
2310 */
2311 for (i = 0; i < 6; i++)
2312 san_mac_addr[i] = 0xFF;
2313
2314 goto san_mac_addr_out;
2315 }
2316
2317 /* make sure we know which port we need to program */
2318 hw->mac.ops.set_lan_id(hw);
2319 /* apply the port offset to the address offset */
2320 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2321 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2322 for (i = 0; i < 3; i++) {
2323 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2324 san_mac_addr[i * 2] = (u8)(san_mac_data);
2325 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2326 san_mac_offset++;
2327 }
2328
2329san_mac_addr_out:
2330 return 0;
2331}
2332
2333/**
2334 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2335 * @hw: pointer to hardware structure
2336 *
2337 * Read PCIe configuration space, and get the MSI-X vector count from
2338 * the capabilities table.
2339 **/
2340u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2341{
2342 struct ixgbe_adapter *adapter = hw->back;
2343 u16 msix_count;
2344 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
2345 &msix_count);
2346 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2347
2348 /* MSI-X count is zero-based in HW, so increment to give proper value */
2349 msix_count++;
2350
2351 return msix_count;
2352}
2353
2354/**
2355 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2356 * @hw: pointer to hardware struct
2357 * @rar: receive address register index to disassociate
2358 * @vmdq: VMDq pool index to remove from the rar
2359 **/
2360s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2361{
2362 u32 mpsar_lo, mpsar_hi;
2363 u32 rar_entries = hw->mac.num_rar_entries;
2364
2365 if (rar < rar_entries) {
2366 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2367 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2368
2369 if (!mpsar_lo && !mpsar_hi)
2370 goto done;
2371
2372 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2373 if (mpsar_lo) {
2374 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2375 mpsar_lo = 0;
2376 }
2377 if (mpsar_hi) {
2378 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2379 mpsar_hi = 0;
2380 }
2381 } else if (vmdq < 32) {
2382 mpsar_lo &= ~(1 << vmdq);
2383 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2384 } else {
2385 mpsar_hi &= ~(1 << (vmdq - 32));
2386 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2387 }
2388
2389 /* was that the last pool using this rar? */
2390 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2391 hw->mac.ops.clear_rar(hw, rar);
2392 } else {
2393 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2394 }
2395
2396done:
2397 return 0;
2398}
2399
2400/**
2401 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2402 * @hw: pointer to hardware struct
2403 * @rar: receive address register index to associate with a VMDq index
2404 * @vmdq: VMDq pool index
2405 **/
2406s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2407{
2408 u32 mpsar;
2409 u32 rar_entries = hw->mac.num_rar_entries;
2410
2411 if (rar < rar_entries) {
2412 if (vmdq < 32) {
2413 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2414 mpsar |= 1 << vmdq;
2415 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2416 } else {
2417 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2418 mpsar |= 1 << (vmdq - 32);
2419 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2420 }
2421 } else {
2422 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2423 }
2424 return 0;
2425}
2426
2427/**
2428 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2429 * @hw: pointer to hardware structure
2430 **/
2431s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2432{
2433 int i;
2434
2435
2436 for (i = 0; i < 128; i++)
2437 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2438
2439 return 0;
2440}
2441
2442/**
2443 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
2444 * @hw: pointer to hardware structure
2445 * @vlan: VLAN id to write to VLAN filter
2446 *
2447 * return the VLVF index where this VLAN id should be placed
2448 *
2449 **/
2450s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2451{
2452 u32 bits = 0;
2453 u32 first_empty_slot = 0;
2454 s32 regindex;
2455
2456 /* short cut the special case */
2457 if (vlan == 0)
2458 return 0;
2459
2460 /*
2461 * Search for the vlan id in the VLVF entries. Save off the first empty
2462 * slot found along the way
2463 */
2464 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2465 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2466 if (!bits && !(first_empty_slot))
2467 first_empty_slot = regindex;
2468 else if ((bits & 0x0FFF) == vlan)
2469 break;
2470 }
2471
2472 /*
2473 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
2474 * in the VLVF. Else use the first empty VLVF register for this
2475 * vlan id.
2476 */
2477 if (regindex >= IXGBE_VLVF_ENTRIES) {
2478 if (first_empty_slot)
2479 regindex = first_empty_slot;
2480 else {
2481 hw_dbg(hw, "No space in VLVF.\n");
2482 regindex = IXGBE_ERR_NO_SPACE;
2483 }
2484 }
2485
2486 return regindex;
2487}
2488
2489/**
2490 * ixgbe_set_vfta_generic - Set VLAN filter table
2491 * @hw: pointer to hardware structure
2492 * @vlan: VLAN id to write to VLAN filter
2493 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2494 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2495 *
2496 * Turn on/off specified VLAN in the VLAN filter table.
2497 **/
2498s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
2499 bool vlan_on)
2500{
2501 s32 regindex;
2502 u32 bitindex;
2503 u32 vfta;
2504 u32 bits;
2505 u32 vt;
2506 u32 targetbit;
2507 bool vfta_changed = false;
2508
2509 if (vlan > 4095)
2510 return IXGBE_ERR_PARAM;
2511
2512 /*
2513 * this is a 2 part operation - first the VFTA, then the
2514 * VLVF and VLVFB if VT Mode is set
2515 * We don't write the VFTA until we know the VLVF part succeeded.
2516 */
2517
2518 /* Part 1
2519 * The VFTA is a bitstring made up of 128 32-bit registers
2520 * that enable the particular VLAN id, much like the MTA:
2521 * bits[11-5]: which register
2522 * bits[4-0]: which bit in the register
2523 */
2524 regindex = (vlan >> 5) & 0x7F;
2525 bitindex = vlan & 0x1F;
2526 targetbit = (1 << bitindex);
2527 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
2528
2529 if (vlan_on) {
2530 if (!(vfta & targetbit)) {
2531 vfta |= targetbit;
2532 vfta_changed = true;
2533 }
2534 } else {
2535 if ((vfta & targetbit)) {
2536 vfta &= ~targetbit;
2537 vfta_changed = true;
2538 }
2539 }
2540
2541 /* Part 2
2542 * If VT Mode is set
2543 * Either vlan_on
2544 * make sure the vlan is in VLVF
2545 * set the vind bit in the matching VLVFB
2546 * Or !vlan_on
2547 * clear the pool bit and possibly the vind
2548 */
2549 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2550 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
2551 s32 vlvf_index;
2552
2553 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
2554 if (vlvf_index < 0)
2555 return vlvf_index;
2556
2557 if (vlan_on) {
2558 /* set the pool bit */
2559 if (vind < 32) {
2560 bits = IXGBE_READ_REG(hw,
2561 IXGBE_VLVFB(vlvf_index*2));
2562 bits |= (1 << vind);
2563 IXGBE_WRITE_REG(hw,
2564 IXGBE_VLVFB(vlvf_index*2),
2565 bits);
2566 } else {
2567 bits = IXGBE_READ_REG(hw,
2568 IXGBE_VLVFB((vlvf_index*2)+1));
2569 bits |= (1 << (vind-32));
2570 IXGBE_WRITE_REG(hw,
2571 IXGBE_VLVFB((vlvf_index*2)+1),
2572 bits);
2573 }
2574 } else {
2575 /* clear the pool bit */
2576 if (vind < 32) {
2577 bits = IXGBE_READ_REG(hw,
2578 IXGBE_VLVFB(vlvf_index*2));
2579 bits &= ~(1 << vind);
2580 IXGBE_WRITE_REG(hw,
2581 IXGBE_VLVFB(vlvf_index*2),
2582 bits);
2583 bits |= IXGBE_READ_REG(hw,
2584 IXGBE_VLVFB((vlvf_index*2)+1));
2585 } else {
2586 bits = IXGBE_READ_REG(hw,
2587 IXGBE_VLVFB((vlvf_index*2)+1));
2588 bits &= ~(1 << (vind-32));
2589 IXGBE_WRITE_REG(hw,
2590 IXGBE_VLVFB((vlvf_index*2)+1),
2591 bits);
2592 bits |= IXGBE_READ_REG(hw,
2593 IXGBE_VLVFB(vlvf_index*2));
2594 }
2595 }
2596
2597 /*
2598 * If there are still bits set in the VLVFB registers
2599 * for the VLAN ID indicated we need to see if the
2600 * caller is requesting that we clear the VFTA entry bit.
2601 * If the caller has requested that we clear the VFTA
2602 * entry bit but there are still pools/VFs using this VLAN
2603 * ID entry then ignore the request. We're not worried
2604 * about the case where we're turning the VFTA VLAN ID
2605 * entry bit on, only when requested to turn it off as
2606 * there may be multiple pools and/or VFs using the
2607 * VLAN ID entry. In that case we cannot clear the
2608 * VFTA bit until all pools/VFs using that VLAN ID have also
2609 * been cleared. This will be indicated by "bits" being
2610 * zero.
2611 */
2612 if (bits) {
2613 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
2614 (IXGBE_VLVF_VIEN | vlan));
2615 if (!vlan_on) {
2616 /* someone wants to clear the vfta entry
2617 * but some pools/VFs are still using it.
2618 * Ignore it. */
2619 vfta_changed = false;
2620 }
2621 }
2622 else
2623 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
2624 }
2625
2626 if (vfta_changed)
2627 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
2628
2629 return 0;
2630}
2631
2632/**
2633 * ixgbe_clear_vfta_generic - Clear VLAN filter table
2634 * @hw: pointer to hardware structure
2635 *
2636 * Clears the VLAN filer table, and the VMDq index associated with the filter
2637 **/
2638s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2639{
2640 u32 offset;
2641
2642 for (offset = 0; offset < hw->mac.vft_size; offset++)
2643 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
2644
2645 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
2646 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
2647 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
2648 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
2649 }
2650
2651 return 0;
2652}
2653
2654/**
2655 * ixgbe_check_mac_link_generic - Determine link and speed status
2656 * @hw: pointer to hardware structure
2657 * @speed: pointer to link speed
2658 * @link_up: true when link is up
2659 * @link_up_wait_to_complete: bool used to wait for link up or not
2660 *
2661 * Reads the links register to determine if link is up and the current speed
2662 **/
2663s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2664 bool *link_up, bool link_up_wait_to_complete)
2665{
2666 u32 links_reg;
2667 u32 i;
2668
2669 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2670 if (link_up_wait_to_complete) {
2671 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2672 if (links_reg & IXGBE_LINKS_UP) {
2673 *link_up = true;
2674 break;
2675 } else {
2676 *link_up = false;
2677 }
2678 msleep(100);
2679 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2680 }
2681 } else {
2682 if (links_reg & IXGBE_LINKS_UP)
2683 *link_up = true;
2684 else
2685 *link_up = false;
2686 }
2687
2688 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2689 IXGBE_LINKS_SPEED_10G_82599)
2690 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2691 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2692 IXGBE_LINKS_SPEED_1G_82599)
2693 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2694 else
2695 *speed = IXGBE_LINK_SPEED_100_FULL;
2696
2697 /* if link is down, zero out the current_mode */
2698 if (*link_up == false) {
2699 hw->fc.current_mode = ixgbe_fc_none;
2700 hw->fc.fc_was_autonegged = false;
2701 }
2702
2703 return 0;
2704}
2705
2706/**
2707 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
2708 * the EEPROM
2709 * @hw: pointer to hardware structure
2710 * @wwnn_prefix: the alternative WWNN prefix
2711 * @wwpn_prefix: the alternative WWPN prefix
2712 *
2713 * This function will read the EEPROM from the alternative SAN MAC address
2714 * block to check the support for the alternative WWNN/WWPN prefix support.
2715 **/
2716s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2717 u16 *wwpn_prefix)
2718{
2719 u16 offset, caps;
2720 u16 alt_san_mac_blk_offset;
2721
2722 /* clear output first */
2723 *wwnn_prefix = 0xFFFF;
2724 *wwpn_prefix = 0xFFFF;
2725
2726 /* check if alternative SAN MAC is supported */
2727 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2728 &alt_san_mac_blk_offset);
2729
2730 if ((alt_san_mac_blk_offset == 0) ||
2731 (alt_san_mac_blk_offset == 0xFFFF))
2732 goto wwn_prefix_out;
2733
2734 /* check capability in alternative san mac address block */
2735 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2736 hw->eeprom.ops.read(hw, offset, &caps);
2737 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2738 goto wwn_prefix_out;
2739
2740 /* get the corresponding prefix for WWNN/WWPN */
2741 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2742 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2743
2744 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2745 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2746
2747wwn_prefix_out:
2748 return 0;
2749}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c9..3080afb12bdf 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -30,6 +30,7 @@
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32 32
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
33s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
35s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -45,20 +46,20 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
45 46
46s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); 47s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
47s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); 48s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
48s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 49s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
49s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 50s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
50 u16 *data); 51 u16 *data);
51s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
52 u16 *checksum_val); 53 u16 *checksum_val);
53s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
55s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
54 56
55s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 57s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
56 u32 enable_addr); 58 u32 enable_addr);
57s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 59s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
58s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 60s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 61s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
60 u32 mc_addr_count, 62 struct net_device *netdev);
61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 63s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct net_device *netdev); 64 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 65s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
@@ -71,9 +72,16 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr);
71s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 72s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
72void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 73void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
73s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 74s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
74 75s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
75s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); 76s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
76s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); 77s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
78s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
79s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
80 u32 vind, bool vlan_on);
81s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
82s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
83 ixgbe_link_speed *speed,
84 bool *link_up, bool link_up_wait_to_complete);
77 85
78s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 86s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
79s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 87s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index dd4883f642be..71da325dfa80 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -488,7 +488,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
488 if (adapter->temp_dcb_cfg.pfc_mode_enable != 488 if (adapter->temp_dcb_cfg.pfc_mode_enable !=
489 adapter->dcb_cfg.pfc_mode_enable) 489 adapter->dcb_cfg.pfc_mode_enable)
490 adapter->dcb_set_bitmap |= BIT_PFC; 490 adapter->dcb_set_bitmap |= BIT_PFC;
491 return;
492} 491}
493 492
494/** 493/**
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5cee77..c50a7541ffec 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -212,8 +212,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
212 ecmd->port = PORT_FIBRE; 212 ecmd->port = PORT_FIBRE;
213 break; 213 break;
214 case ixgbe_phy_nl: 214 case ixgbe_phy_nl:
215 case ixgbe_phy_tw_tyco: 215 case ixgbe_phy_sfp_passive_tyco:
216 case ixgbe_phy_tw_unknown: 216 case ixgbe_phy_sfp_passive_unknown:
217 case ixgbe_phy_sfp_ftl: 217 case ixgbe_phy_sfp_ftl:
218 case ixgbe_phy_sfp_avago: 218 case ixgbe_phy_sfp_avago:
219 case ixgbe_phy_sfp_intel: 219 case ixgbe_phy_sfp_intel:
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
365 else 365 else
366 fc.disable_fc_autoneg = false; 366 fc.disable_fc_autoneg = false;
367 367
368 if (pause->rx_pause && pause->tx_pause) 368 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
369 fc.requested_mode = ixgbe_fc_full; 369 fc.requested_mode = ixgbe_fc_full;
370 else if (pause->rx_pause && !pause->tx_pause) 370 else if (pause->rx_pause && !pause->tx_pause)
371 fc.requested_mode = ixgbe_fc_rx_pause; 371 fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1458 struct ixgbe_tx_buffer *buf = 1458 struct ixgbe_tx_buffer *buf =
1459 &(tx_ring->tx_buffer_info[i]); 1459 &(tx_ring->tx_buffer_info[i]);
1460 if (buf->dma) 1460 if (buf->dma)
1461 pci_unmap_single(pdev, buf->dma, buf->length, 1461 dma_unmap_single(&pdev->dev, buf->dma,
1462 PCI_DMA_TODEVICE); 1462 buf->length, DMA_TO_DEVICE);
1463 if (buf->skb) 1463 if (buf->skb)
1464 dev_kfree_skb(buf->skb); 1464 dev_kfree_skb(buf->skb);
1465 } 1465 }
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1470 struct ixgbe_rx_buffer *buf = 1470 struct ixgbe_rx_buffer *buf =
1471 &(rx_ring->rx_buffer_info[i]); 1471 &(rx_ring->rx_buffer_info[i]);
1472 if (buf->dma) 1472 if (buf->dma)
1473 pci_unmap_single(pdev, buf->dma, 1473 dma_unmap_single(&pdev->dev, buf->dma,
1474 IXGBE_RXBUFFER_2048, 1474 IXGBE_RXBUFFER_2048,
1475 PCI_DMA_FROMDEVICE); 1475 DMA_FROM_DEVICE);
1476 if (buf->skb) 1476 if (buf->skb)
1477 dev_kfree_skb(buf->skb); 1477 dev_kfree_skb(buf->skb);
1478 } 1478 }
1479 } 1479 }
1480 1480
1481 if (tx_ring->desc) { 1481 if (tx_ring->desc) {
1482 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, 1482 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1483 tx_ring->dma); 1483 tx_ring->dma);
1484 tx_ring->desc = NULL; 1484 tx_ring->desc = NULL;
1485 } 1485 }
1486 if (rx_ring->desc) { 1486 if (rx_ring->desc) {
1487 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, 1487 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1488 rx_ring->dma); 1488 rx_ring->dma);
1489 rx_ring->desc = NULL; 1489 rx_ring->desc = NULL;
1490 } 1490 }
1491 1491
@@ -1493,8 +1493,6 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1493 tx_ring->tx_buffer_info = NULL; 1493 tx_ring->tx_buffer_info = NULL;
1494 kfree(rx_ring->rx_buffer_info); 1494 kfree(rx_ring->rx_buffer_info);
1495 rx_ring->rx_buffer_info = NULL; 1495 rx_ring->rx_buffer_info = NULL;
1496
1497 return;
1498} 1496}
1499 1497
1500static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1498static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1520,8 +1518,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1520 1518
1521 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 1519 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1522 tx_ring->size = ALIGN(tx_ring->size, 4096); 1520 tx_ring->size = ALIGN(tx_ring->size, 4096);
1523 if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1521 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1524 &tx_ring->dma))) { 1522 &tx_ring->dma, GFP_KERNEL);
1523 if (!(tx_ring->desc)) {
1525 ret_val = 2; 1524 ret_val = 2;
1526 goto err_nomem; 1525 goto err_nomem;
1527 } 1526 }
@@ -1563,8 +1562,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1563 tx_ring->tx_buffer_info[i].skb = skb; 1562 tx_ring->tx_buffer_info[i].skb = skb;
1564 tx_ring->tx_buffer_info[i].length = skb->len; 1563 tx_ring->tx_buffer_info[i].length = skb->len;
1565 tx_ring->tx_buffer_info[i].dma = 1564 tx_ring->tx_buffer_info[i].dma =
1566 pci_map_single(pdev, skb->data, skb->len, 1565 dma_map_single(&pdev->dev, skb->data, skb->len,
1567 PCI_DMA_TODEVICE); 1566 DMA_TO_DEVICE);
1568 desc->read.buffer_addr = 1567 desc->read.buffer_addr =
1569 cpu_to_le64(tx_ring->tx_buffer_info[i].dma); 1568 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1570 desc->read.cmd_type_len = cpu_to_le32(skb->len); 1569 desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1592,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1593 1592
1594 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 1593 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1595 rx_ring->size = ALIGN(rx_ring->size, 4096); 1594 rx_ring->size = ALIGN(rx_ring->size, 4096);
1596 if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1595 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1597 &rx_ring->dma))) { 1596 &rx_ring->dma, GFP_KERNEL);
1597 if (!(rx_ring->desc)) {
1598 ret_val = 5; 1598 ret_val = 5;
1599 goto err_nomem; 1599 goto err_nomem;
1600 } 1600 }
@@ -1661,8 +1661,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1661 skb_reserve(skb, NET_IP_ALIGN); 1661 skb_reserve(skb, NET_IP_ALIGN);
1662 rx_ring->rx_buffer_info[i].skb = skb; 1662 rx_ring->rx_buffer_info[i].skb = skb;
1663 rx_ring->rx_buffer_info[i].dma = 1663 rx_ring->rx_buffer_info[i].dma =
1664 pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, 1664 dma_map_single(&pdev->dev, skb->data,
1665 PCI_DMA_FROMDEVICE); 1665 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1666 rx_desc->read.pkt_addr = 1666 rx_desc->read.pkt_addr =
1667 cpu_to_le64(rx_ring->rx_buffer_info[i].dma); 1667 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1668 memset(skb->data, 0x00, skb->len); 1668 memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1775,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1775 ixgbe_create_lbtest_frame( 1775 ixgbe_create_lbtest_frame(
1776 tx_ring->tx_buffer_info[k].skb, 1776 tx_ring->tx_buffer_info[k].skb,
1777 1024); 1777 1024);
1778 pci_dma_sync_single_for_device(pdev, 1778 dma_sync_single_for_device(&pdev->dev,
1779 tx_ring->tx_buffer_info[k].dma, 1779 tx_ring->tx_buffer_info[k].dma,
1780 tx_ring->tx_buffer_info[k].length, 1780 tx_ring->tx_buffer_info[k].length,
1781 PCI_DMA_TODEVICE); 1781 DMA_TO_DEVICE);
1782 if (unlikely(++k == tx_ring->count)) 1782 if (unlikely(++k == tx_ring->count))
1783 k = 0; 1783 k = 0;
1784 } 1784 }
@@ -1789,10 +1789,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1789 good_cnt = 0; 1789 good_cnt = 0;
1790 do { 1790 do {
1791 /* receive the sent packets */ 1791 /* receive the sent packets */
1792 pci_dma_sync_single_for_cpu(pdev, 1792 dma_sync_single_for_cpu(&pdev->dev,
1793 rx_ring->rx_buffer_info[l].dma, 1793 rx_ring->rx_buffer_info[l].dma,
1794 IXGBE_RXBUFFER_2048, 1794 IXGBE_RXBUFFER_2048,
1795 PCI_DMA_FROMDEVICE); 1795 DMA_FROM_DEVICE);
1796 ret_val = ixgbe_check_lbtest_frame( 1796 ret_val = ixgbe_check_lbtest_frame(
1797 rx_ring->rx_buffer_info[l].skb, 1024); 1797 rx_ring->rx_buffer_info[l].skb, 1024);
1798 if (!ret_val) 1798 if (!ret_val)
@@ -1971,8 +1971,6 @@ static void ixgbe_get_wol(struct net_device *netdev,
1971 wol->wolopts |= WAKE_BCAST; 1971 wol->wolopts |= WAKE_BCAST;
1972 if (adapter->wol & IXGBE_WUFC_MAG) 1972 if (adapter->wol & IXGBE_WUFC_MAG)
1973 wol->wolopts |= WAKE_MAGIC; 1973 wol->wolopts |= WAKE_MAGIC;
1974
1975 return;
1976} 1974}
1977 1975
1978static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1976static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2079,12 +2077,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2079 return 0; 2077 return 0;
2080} 2078}
2081 2079
2080/*
2081 * this function must be called before setting the new value of
2082 * rx_itr_setting
2083 */
2084static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
2085 struct ethtool_coalesce *ec)
2086{
2087 /* check the old value and enable RSC if necessary */
2088 if ((adapter->rx_itr_setting == 0) &&
2089 (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
2090 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2091 adapter->netdev->features |= NETIF_F_LRO;
2092 DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
2093 ec->rx_coalesce_usecs);
2094 return true;
2095 }
2096 return false;
2097}
2098
2082static int ixgbe_set_coalesce(struct net_device *netdev, 2099static int ixgbe_set_coalesce(struct net_device *netdev,
2083 struct ethtool_coalesce *ec) 2100 struct ethtool_coalesce *ec)
2084{ 2101{
2085 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2102 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2086 struct ixgbe_q_vector *q_vector; 2103 struct ixgbe_q_vector *q_vector;
2087 int i; 2104 int i;
2105 bool need_reset = false;
2088 2106
2089 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2107 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2090 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2108 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2113,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2095 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2113 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2096 2114
2097 if (ec->rx_coalesce_usecs > 1) { 2115 if (ec->rx_coalesce_usecs > 1) {
2116 u32 max_int;
2117 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2118 max_int = IXGBE_MAX_RSC_INT_RATE;
2119 else
2120 max_int = IXGBE_MAX_INT_RATE;
2121
2098 /* check the limits */ 2122 /* check the limits */
2099 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2123 if ((1000000/ec->rx_coalesce_usecs > max_int) ||
2100 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2124 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2101 return -EINVAL; 2125 return -EINVAL;
2102 2126
2127 /* check the old value and enable RSC if necessary */
2128 need_reset = ixgbe_reenable_rsc(adapter, ec);
2129
2103 /* store the value in ints/second */ 2130 /* store the value in ints/second */
2104 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2131 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2105 2132
@@ -2108,6 +2135,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2108 /* clear the lower bit as its used for dynamic state */ 2135 /* clear the lower bit as its used for dynamic state */
2109 adapter->rx_itr_setting &= ~1; 2136 adapter->rx_itr_setting &= ~1;
2110 } else if (ec->rx_coalesce_usecs == 1) { 2137 } else if (ec->rx_coalesce_usecs == 1) {
2138 /* check the old value and enable RSC if necessary */
2139 need_reset = ixgbe_reenable_rsc(adapter, ec);
2140
2111 /* 1 means dynamic mode */ 2141 /* 1 means dynamic mode */
2112 adapter->rx_eitr_param = 20000; 2142 adapter->rx_eitr_param = 20000;
2113 adapter->rx_itr_setting = 1; 2143 adapter->rx_itr_setting = 1;
@@ -2116,14 +2146,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2116 * any other value means disable eitr, which is best 2146 * any other value means disable eitr, which is best
2117 * served by setting the interrupt rate very high 2147 * served by setting the interrupt rate very high
2118 */ 2148 */
2119 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 2149 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2120 adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
2121 else
2122 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2123 adapter->rx_itr_setting = 0; 2150 adapter->rx_itr_setting = 0;
2151
2152 /*
2153 * if hardware RSC is enabled, disable it when
2154 * setting low latency mode, to avoid errata, assuming
2155 * that when the user set low latency mode they want
2156 * it at the cost of anything else
2157 */
2158 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2159 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2160 netdev->features &= ~NETIF_F_LRO;
2161 DPRINTK(PROBE, INFO,
2162 "rx-usecs set to 0, disabling RSC\n");
2163
2164 need_reset = true;
2165 }
2124 } 2166 }
2125 2167
2126 if (ec->tx_coalesce_usecs > 1) { 2168 if (ec->tx_coalesce_usecs > 1) {
2169 /*
2170 * don't have to worry about max_int as above because
2171 * tx vectors don't do hardware RSC (an rx function)
2172 */
2127 /* check the limits */ 2173 /* check the limits */
2128 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2174 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2129 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2175 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2213,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2167 ixgbe_write_eitr(q_vector); 2213 ixgbe_write_eitr(q_vector);
2168 } 2214 }
2169 2215
2216 /*
2217 * do reset here at the end to make sure EITR==0 case is handled
2218 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2219 * also locks in RSC enable/disable which requires reset
2220 */
2221 if (need_reset) {
2222 if (netif_running(netdev))
2223 ixgbe_reinit_locked(adapter);
2224 else
2225 ixgbe_reset(adapter);
2226 }
2227
2170 return 0; 2228 return 0;
2171} 2229}
2172 2230
@@ -2178,10 +2236,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2178 ethtool_op_set_flags(netdev, data); 2236 ethtool_op_set_flags(netdev, data);
2179 2237
2180 /* if state changes we need to update adapter->flags and reset */ 2238 /* if state changes we need to update adapter->flags and reset */
2181 if ((!!(data & ETH_FLAG_LRO)) != 2239 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
2182 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2240 /*
2183 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2241 * cast both to bool and verify if they are set the same
2184 need_reset = true; 2242 * but only enable RSC if itr is non-zero, as
2243 * itr=0 and RSC are mutually exclusive
2244 */
2245 if (((!!(data & ETH_FLAG_LRO)) !=
2246 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
2247 adapter->rx_itr_setting) {
2248 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2249 switch (adapter->hw.mac.type) {
2250 case ixgbe_mac_82599EB:
2251 need_reset = true;
2252 break;
2253 default:
2254 break;
2255 }
2256 } else if (!adapter->rx_itr_setting) {
2257 netdev->features &= ~ETH_FLAG_LRO;
2258 }
2185 } 2259 }
2186 2260
2187 /* 2261 /*
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6493049b663d..45182ab41d6b 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -32,6 +32,7 @@
32#endif /* CONFIG_IXGBE_DCB */ 32#endif /* CONFIG_IXGBE_DCB */
33#include <linux/if_ether.h> 33#include <linux/if_ether.h>
34#include <linux/gfp.h> 34#include <linux/gfp.h>
35#include <linux/if_vlan.h>
35#include <scsi/scsi_cmnd.h> 36#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_device.h> 37#include <scsi/scsi_device.h>
37#include <scsi/fc/fc_fs.h> 38#include <scsi/fc/fc_fs.h>
@@ -312,10 +313,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
312 if (fcerr == IXGBE_FCERR_BADCRC) 313 if (fcerr == IXGBE_FCERR_BADCRC)
313 skb->ip_summed = CHECKSUM_NONE; 314 skb->ip_summed = CHECKSUM_NONE;
314 315
315 skb_reset_network_header(skb); 316 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
316 skb_set_transport_header(skb, skb_network_offset(skb) + 317 fh = (struct fc_frame_header *)(skb->data +
317 sizeof(struct fcoe_hdr)); 318 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
318 fh = (struct fc_frame_header *)skb_transport_header(skb); 319 else
320 fh = (struct fc_frame_header *)(skb->data +
321 sizeof(struct fcoe_hdr));
319 fctl = ntoh24(fh->fh_f_ctl); 322 fctl = ntoh24(fh->fh_f_ctl);
320 if (fctl & FC_FC_EX_CTX) 323 if (fctl & FC_FC_EX_CTX)
321 xid = be16_to_cpu(fh->fh_ox_id); 324 xid = be16_to_cpu(fh->fh_ox_id);
@@ -536,12 +539,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
536 } 539 }
537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 540 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 541 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
539 fcoe_i = f->mask;
540 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
541 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
542 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
543 IXGBE_ETQS_QUEUE_EN |
544 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
545 } else { 542 } else {
546 /* Use single rx queue for FCoE */ 543 /* Use single rx queue for FCoE */
547 fcoe_i = f->mask; 544 fcoe_i = f->mask;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6c00ee493a3b..9551cbb7bf01 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
176} 176}
177 177
178struct ixgbe_reg_info {
179 u32 ofs;
180 char *name;
181};
182
183static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
184
185 /* General Registers */
186 {IXGBE_CTRL, "CTRL"},
187 {IXGBE_STATUS, "STATUS"},
188 {IXGBE_CTRL_EXT, "CTRL_EXT"},
189
190 /* Interrupt Registers */
191 {IXGBE_EICR, "EICR"},
192
193 /* RX Registers */
194 {IXGBE_SRRCTL(0), "SRRCTL"},
195 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
196 {IXGBE_RDLEN(0), "RDLEN"},
197 {IXGBE_RDH(0), "RDH"},
198 {IXGBE_RDT(0), "RDT"},
199 {IXGBE_RXDCTL(0), "RXDCTL"},
200 {IXGBE_RDBAL(0), "RDBAL"},
201 {IXGBE_RDBAH(0), "RDBAH"},
202
203 /* TX Registers */
204 {IXGBE_TDBAL(0), "TDBAL"},
205 {IXGBE_TDBAH(0), "TDBAH"},
206 {IXGBE_TDLEN(0), "TDLEN"},
207 {IXGBE_TDH(0), "TDH"},
208 {IXGBE_TDT(0), "TDT"},
209 {IXGBE_TXDCTL(0), "TXDCTL"},
210
211 /* List Terminator */
212 {}
213};
214
215
216/*
217 * ixgbe_regdump - register printout routine
218 */
219static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
220{
221 int i = 0, j = 0;
222 char rname[16];
223 u32 regs[64];
224
225 switch (reginfo->ofs) {
226 case IXGBE_SRRCTL(0):
227 for (i = 0; i < 64; i++)
228 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
229 break;
230 case IXGBE_DCA_RXCTRL(0):
231 for (i = 0; i < 64; i++)
232 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
233 break;
234 case IXGBE_RDLEN(0):
235 for (i = 0; i < 64; i++)
236 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
237 break;
238 case IXGBE_RDH(0):
239 for (i = 0; i < 64; i++)
240 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
241 break;
242 case IXGBE_RDT(0):
243 for (i = 0; i < 64; i++)
244 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
245 break;
246 case IXGBE_RXDCTL(0):
247 for (i = 0; i < 64; i++)
248 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
249 break;
250 case IXGBE_RDBAL(0):
251 for (i = 0; i < 64; i++)
252 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
253 break;
254 case IXGBE_RDBAH(0):
255 for (i = 0; i < 64; i++)
256 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
257 break;
258 case IXGBE_TDBAL(0):
259 for (i = 0; i < 64; i++)
260 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
261 break;
262 case IXGBE_TDBAH(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
265 break;
266 case IXGBE_TDLEN(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
269 break;
270 case IXGBE_TDH(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
273 break;
274 case IXGBE_TDT(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
277 break;
278 case IXGBE_TXDCTL(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
281 break;
282 default:
283 printk(KERN_INFO "%-15s %08x\n", reginfo->name,
284 IXGBE_READ_REG(hw, reginfo->ofs));
285 return;
286 }
287
288 for (i = 0; i < 8; i++) {
289 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
290 printk(KERN_ERR "%-15s ", rname);
291 for (j = 0; j < 8; j++)
292 printk(KERN_CONT "%08x ", regs[i*8+j]);
293 printk(KERN_CONT "\n");
294 }
295
296}
297
298/*
299 * ixgbe_dump - Print registers, tx-rings and rx-rings
300 */
301static void ixgbe_dump(struct ixgbe_adapter *adapter)
302{
303 struct net_device *netdev = adapter->netdev;
304 struct ixgbe_hw *hw = &adapter->hw;
305 struct ixgbe_reg_info *reginfo;
306 int n = 0;
307 struct ixgbe_ring *tx_ring;
308 struct ixgbe_tx_buffer *tx_buffer_info;
309 union ixgbe_adv_tx_desc *tx_desc;
310 struct my_u0 { u64 a; u64 b; } *u0;
311 struct ixgbe_ring *rx_ring;
312 union ixgbe_adv_rx_desc *rx_desc;
313 struct ixgbe_rx_buffer *rx_buffer_info;
314 u32 staterr;
315 int i = 0;
316
317 if (!netif_msg_hw(adapter))
318 return;
319
320 /* Print netdevice Info */
321 if (netdev) {
322 dev_info(&adapter->pdev->dev, "Net device Info\n");
323 printk(KERN_INFO "Device Name state "
324 "trans_start last_rx\n");
325 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
326 netdev->name,
327 netdev->state,
328 netdev->trans_start,
329 netdev->last_rx);
330 }
331
332 /* Print Registers */
333 dev_info(&adapter->pdev->dev, "Register Dump\n");
334 printk(KERN_INFO " Register Name Value\n");
335 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
336 reginfo->name; reginfo++) {
337 ixgbe_regdump(hw, reginfo);
338 }
339
340 /* Print TX Ring Summary */
341 if (!netdev || !netif_running(netdev))
342 goto exit;
343
344 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
345 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
346 "leng ntw timestamp\n");
347 for (n = 0; n < adapter->num_tx_queues; n++) {
348 tx_ring = adapter->tx_ring[n];
349 tx_buffer_info =
350 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
351 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
352 n, tx_ring->next_to_use, tx_ring->next_to_clean,
353 (u64)tx_buffer_info->dma,
354 tx_buffer_info->length,
355 tx_buffer_info->next_to_watch,
356 (u64)tx_buffer_info->time_stamp);
357 }
358
359 /* Print TX Rings */
360 if (!netif_msg_tx_done(adapter))
361 goto rx_ring_summary;
362
363 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
364
365 /* Transmit Descriptor Formats
366 *
367 * Advanced Transmit Descriptor
368 * +--------------------------------------------------------------+
369 * 0 | Buffer Address [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
372 * +--------------------------------------------------------------+
373 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
374 */
375
376 for (n = 0; n < adapter->num_tx_queues; n++) {
377 tx_ring = adapter->tx_ring[n];
378 printk(KERN_INFO "------------------------------------\n");
379 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
380 printk(KERN_INFO "------------------------------------\n");
381 printk(KERN_INFO "T [desc] [address 63:0 ] "
382 "[PlPOIdStDDt Ln] [bi->dma ] "
383 "leng ntw timestamp bi->skb\n");
384
385 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
386 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
387 tx_buffer_info = &tx_ring->tx_buffer_info[i];
388 u0 = (struct my_u0 *)tx_desc;
389 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
390 " %04X %3X %016llX %p", i,
391 le64_to_cpu(u0->a),
392 le64_to_cpu(u0->b),
393 (u64)tx_buffer_info->dma,
394 tx_buffer_info->length,
395 tx_buffer_info->next_to_watch,
396 (u64)tx_buffer_info->time_stamp,
397 tx_buffer_info->skb);
398 if (i == tx_ring->next_to_use &&
399 i == tx_ring->next_to_clean)
400 printk(KERN_CONT " NTC/U\n");
401 else if (i == tx_ring->next_to_use)
402 printk(KERN_CONT " NTU\n");
403 else if (i == tx_ring->next_to_clean)
404 printk(KERN_CONT " NTC\n");
405 else
406 printk(KERN_CONT "\n");
407
408 if (netif_msg_pktdata(adapter) &&
409 tx_buffer_info->dma != 0)
410 print_hex_dump(KERN_INFO, "",
411 DUMP_PREFIX_ADDRESS, 16, 1,
412 phys_to_virt(tx_buffer_info->dma),
413 tx_buffer_info->length, true);
414 }
415 }
416
417 /* Print RX Rings Summary */
418rx_ring_summary:
419 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
420 printk(KERN_INFO "Queue [NTU] [NTC]\n");
421 for (n = 0; n < adapter->num_rx_queues; n++) {
422 rx_ring = adapter->rx_ring[n];
423 printk(KERN_INFO "%5d %5X %5X\n", n,
424 rx_ring->next_to_use, rx_ring->next_to_clean);
425 }
426
427 /* Print RX Rings */
428 if (!netif_msg_rx_status(adapter))
429 goto exit;
430
431 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
432
433 /* Advanced Receive Descriptor (Read) Format
434 * 63 1 0
435 * +-----------------------------------------------------+
436 * 0 | Packet Buffer Address [63:1] |A0/NSE|
437 * +----------------------------------------------+------+
438 * 8 | Header Buffer Address [63:1] | DD |
439 * +-----------------------------------------------------+
440 *
441 *
442 * Advanced Receive Descriptor (Write-Back) Format
443 *
444 * 63 48 47 32 31 30 21 20 16 15 4 3 0
445 * +------------------------------------------------------+
446 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
447 * | Checksum Ident | | | | Type | Type |
448 * +------------------------------------------------------+
449 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
450 * +------------------------------------------------------+
451 * 63 48 47 32 31 20 19 0
452 */
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO "------------------------------------\n");
456 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
457 printk(KERN_INFO "------------------------------------\n");
458 printk(KERN_INFO "R [desc] [ PktBuf A0] "
459 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
460 "<-- Adv Rx Read format\n");
461 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
462 "[vl er S cks ln] ---------------- [bi->skb] "
463 "<-- Adv Rx Write-Back format\n");
464
465 for (i = 0; i < rx_ring->count; i++) {
466 rx_buffer_info = &rx_ring->rx_buffer_info[i];
467 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
468 u0 = (struct my_u0 *)rx_desc;
469 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
470 if (staterr & IXGBE_RXD_STAT_DD) {
471 /* Descriptor Done */
472 printk(KERN_INFO "RWB[0x%03X] %016llX "
473 "%016llX ---------------- %p", i,
474 le64_to_cpu(u0->a),
475 le64_to_cpu(u0->b),
476 rx_buffer_info->skb);
477 } else {
478 printk(KERN_INFO "R [0x%03X] %016llX "
479 "%016llX %016llX %p", i,
480 le64_to_cpu(u0->a),
481 le64_to_cpu(u0->b),
482 (u64)rx_buffer_info->dma,
483 rx_buffer_info->skb);
484
485 if (netif_msg_pktdata(adapter)) {
486 print_hex_dump(KERN_INFO, "",
487 DUMP_PREFIX_ADDRESS, 16, 1,
488 phys_to_virt(rx_buffer_info->dma),
489 rx_ring->rx_buf_len, true);
490
491 if (rx_ring->rx_buf_len
492 < IXGBE_RXBUFFER_2048)
493 print_hex_dump(KERN_INFO, "",
494 DUMP_PREFIX_ADDRESS, 16, 1,
495 phys_to_virt(
496 rx_buffer_info->page_dma +
497 rx_buffer_info->page_offset
498 ),
499 PAGE_SIZE/2, true);
500 }
501 }
502
503 if (i == rx_ring->next_to_use)
504 printk(KERN_CONT " NTU\n");
505 else if (i == rx_ring->next_to_clean)
506 printk(KERN_CONT " NTC\n");
507 else
508 printk(KERN_CONT "\n");
509
510 }
511 }
512
513exit:
514 return;
515}
516
178static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 517static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
179{ 518{
180 u32 ctrl_ext; 519 u32 ctrl_ext;
@@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
266{ 605{
267 if (tx_buffer_info->dma) { 606 if (tx_buffer_info->dma) {
268 if (tx_buffer_info->mapped_as_page) 607 if (tx_buffer_info->mapped_as_page)
269 pci_unmap_page(adapter->pdev, 608 dma_unmap_page(&adapter->pdev->dev,
270 tx_buffer_info->dma, 609 tx_buffer_info->dma,
271 tx_buffer_info->length, 610 tx_buffer_info->length,
272 PCI_DMA_TODEVICE); 611 DMA_TO_DEVICE);
273 else 612 else
274 pci_unmap_single(adapter->pdev, 613 dma_unmap_single(&adapter->pdev->dev,
275 tx_buffer_info->dma, 614 tx_buffer_info->dma,
276 tx_buffer_info->length, 615 tx_buffer_info->length,
277 PCI_DMA_TODEVICE); 616 DMA_TO_DEVICE);
278 tx_buffer_info->dma = 0; 617 tx_buffer_info->dma = 0;
279 } 618 }
280 if (tx_buffer_info->skb) { 619 if (tx_buffer_info->skb) {
@@ -286,16 +625,16 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
286} 625}
287 626
288/** 627/**
289 * ixgbe_tx_is_paused - check if the tx ring is paused 628 * ixgbe_tx_xon_state - check the tx ring xon state
290 * @adapter: the ixgbe adapter 629 * @adapter: the ixgbe adapter
291 * @tx_ring: the corresponding tx_ring 630 * @tx_ring: the corresponding tx_ring
292 * 631 *
293 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 632 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
294 * corresponding TC of this tx_ring when checking TFCS. 633 * corresponding TC of this tx_ring when checking TFCS.
295 * 634 *
296 * Returns : true if paused 635 * Returns : true if in xon state (currently not paused)
297 */ 636 */
298static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, 637static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
299 struct ixgbe_ring *tx_ring) 638 struct ixgbe_ring *tx_ring)
300{ 639{
301 u32 txoff = IXGBE_TFCS_TXOFF; 640 u32 txoff = IXGBE_TFCS_TXOFF;
@@ -351,7 +690,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
351 adapter->detect_tx_hung = false; 690 adapter->detect_tx_hung = false;
352 if (tx_ring->tx_buffer_info[eop].time_stamp && 691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
353 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
354 !ixgbe_tx_is_paused(adapter, tx_ring)) { 693 ixgbe_tx_xon_state(adapter, tx_ring)) {
355 /* detected Tx unit hang */ 694 /* detected Tx unit hang */
356 union ixgbe_adv_tx_desc *tx_desc; 695 union ixgbe_adv_tx_desc *tx_desc;
357 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 696 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
721 bi->page_offset ^= (PAGE_SIZE / 2); 1060 bi->page_offset ^= (PAGE_SIZE / 2);
722 } 1061 }
723 1062
724 bi->page_dma = pci_map_page(pdev, bi->page, 1063 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
725 bi->page_offset, 1064 bi->page_offset,
726 (PAGE_SIZE / 2), 1065 (PAGE_SIZE / 2),
727 PCI_DMA_FROMDEVICE); 1066 DMA_FROM_DEVICE);
728 } 1067 }
729 1068
730 if (!bi->skb) { 1069 if (!bi->skb) {
@@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
743 - skb->data)); 1082 - skb->data));
744 1083
745 bi->skb = skb; 1084 bi->skb = skb;
746 bi->dma = pci_map_single(pdev, skb->data, 1085 bi->dma = dma_map_single(&pdev->dev, skb->data,
747 rx_ring->rx_buf_len, 1086 rx_ring->rx_buf_len,
748 PCI_DMA_FROMDEVICE); 1087 DMA_FROM_DEVICE);
749 } 1088 }
750 /* Refresh the desc even if buffer_addrs didn't change because 1089 /* Refresh the desc even if buffer_addrs didn't change because
751 * each write-back erases this info. */ 1090 * each write-back erases this info. */
@@ -821,6 +1160,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
821 1160
822struct ixgbe_rsc_cb { 1161struct ixgbe_rsc_cb {
823 dma_addr_t dma; 1162 dma_addr_t dma;
1163 bool delay_unmap;
824}; 1164};
825 1165
826#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1166#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -861,9 +1201,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
861 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 1201 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
862 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 1202 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
863 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 1203 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
864 if (len > IXGBE_RX_HDR_SIZE)
865 len = IXGBE_RX_HDR_SIZE;
866 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 1204 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1205 if ((len > IXGBE_RX_HDR_SIZE) ||
1206 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1207 len = IXGBE_RX_HDR_SIZE;
867 } else { 1208 } else {
868 len = le16_to_cpu(rx_desc->wb.upper.length); 1209 len = le16_to_cpu(rx_desc->wb.upper.length);
869 } 1210 }
@@ -876,7 +1217,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
876 if (rx_buffer_info->dma) { 1217 if (rx_buffer_info->dma) {
877 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1218 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
878 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1219 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
879 (!(skb->prev))) 1220 (!(skb->prev))) {
880 /* 1221 /*
881 * When HWRSC is enabled, delay unmapping 1222 * When HWRSC is enabled, delay unmapping
882 * of the first packet. It carries the 1223 * of the first packet. It carries the
@@ -884,18 +1225,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
884 * access the header after the writeback. 1225 * access the header after the writeback.
885 * Only unmap it when EOP is reached 1226 * Only unmap it when EOP is reached
886 */ 1227 */
1228 IXGBE_RSC_CB(skb)->delay_unmap = true;
887 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1229 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
888 else 1230 } else {
889 pci_unmap_single(pdev, rx_buffer_info->dma, 1231 dma_unmap_single(&pdev->dev,
1232 rx_buffer_info->dma,
890 rx_ring->rx_buf_len, 1233 rx_ring->rx_buf_len,
891 PCI_DMA_FROMDEVICE); 1234 DMA_FROM_DEVICE);
1235 }
892 rx_buffer_info->dma = 0; 1236 rx_buffer_info->dma = 0;
893 skb_put(skb, len); 1237 skb_put(skb, len);
894 } 1238 }
895 1239
896 if (upper_len) { 1240 if (upper_len) {
897 pci_unmap_page(pdev, rx_buffer_info->page_dma, 1241 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
898 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 1242 PAGE_SIZE / 2, DMA_FROM_DEVICE);
899 rx_buffer_info->page_dma = 0; 1243 rx_buffer_info->page_dma = 0;
900 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1244 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
901 rx_buffer_info->page, 1245 rx_buffer_info->page,
@@ -936,11 +1280,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
936 if (skb->prev) 1280 if (skb->prev)
937 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1281 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
938 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1282 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
939 if (IXGBE_RSC_CB(skb)->dma) { 1283 if (IXGBE_RSC_CB(skb)->delay_unmap) {
940 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, 1284 dma_unmap_single(&pdev->dev,
1285 IXGBE_RSC_CB(skb)->dma,
941 rx_ring->rx_buf_len, 1286 rx_ring->rx_buf_len,
942 PCI_DMA_FROMDEVICE); 1287 DMA_FROM_DEVICE);
943 IXGBE_RSC_CB(skb)->dma = 0; 1288 IXGBE_RSC_CB(skb)->dma = 0;
1289 IXGBE_RSC_CB(skb)->delay_unmap = false;
944 } 1290 }
945 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1291 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
946 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1292 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -1190,6 +1536,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1190 itr_reg |= (itr_reg << 16); 1536 itr_reg |= (itr_reg << 16);
1191 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1537 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1192 /* 1538 /*
1539 * 82599 can support a value of zero, so allow it for
1540 * max interrupt rate, but there is an errata where it can
1541 * not be zero with RSC
1542 */
1543 if (itr_reg == 8 &&
1544 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1545 itr_reg = 0;
1546
1547 /*
1193 * set the WDIS bit to not clear the timer bits and cause an 1548 * set the WDIS bit to not clear the timer bits and cause an
1194 * immediate assertion of the interrupt 1549 * immediate assertion of the interrupt
1195 */ 1550 */
@@ -1261,8 +1616,6 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1261 1616
1262 ixgbe_write_eitr(q_vector); 1617 ixgbe_write_eitr(q_vector);
1263 } 1618 }
1264
1265 return;
1266} 1619}
1267 1620
1268static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1621static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1826,8 +2179,6 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1826 2179
1827 ixgbe_write_eitr(q_vector); 2180 ixgbe_write_eitr(q_vector);
1828 } 2181 }
1829
1830 return;
1831} 2182}
1832 2183
1833/** 2184/**
@@ -2372,7 +2723,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2372 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 2723 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2373 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); 2724 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2374 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 2725 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2375 ixgbe_set_vmolr(hw, adapter->num_vfs); 2726 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2376 } 2727 }
2377 2728
2378 /* Program MRQC for the distribution of queues */ 2729 /* Program MRQC for the distribution of queues */
@@ -2482,12 +2833,82 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2482 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 2833 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2483} 2834}
2484 2835
2836/**
2837 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
2838 * @adapter: driver data
2839 */
2840static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2841{
2842 struct ixgbe_hw *hw = &adapter->hw;
2843 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2844 int i, j;
2845
2846 switch (hw->mac.type) {
2847 case ixgbe_mac_82598EB:
2848 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2849#ifdef CONFIG_IXGBE_DCB
2850 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2851 vlnctrl &= ~IXGBE_VLNCTRL_VME;
2852#endif
2853 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2854 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2855 break;
2856 case ixgbe_mac_82599EB:
2857 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2858 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2859 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2860#ifdef CONFIG_IXGBE_DCB
2861 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
2862 break;
2863#endif
2864 for (i = 0; i < adapter->num_rx_queues; i++) {
2865 j = adapter->rx_ring[i]->reg_idx;
2866 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2867 vlnctrl &= ~IXGBE_RXDCTL_VME;
2868 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2869 }
2870 break;
2871 default:
2872 break;
2873 }
2874}
2875
2876/**
2877 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
2878 * @adapter: driver data
2879 */
2880static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2881{
2882 struct ixgbe_hw *hw = &adapter->hw;
2883 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2884 int i, j;
2885
2886 switch (hw->mac.type) {
2887 case ixgbe_mac_82598EB:
2888 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2889 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2890 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2891 break;
2892 case ixgbe_mac_82599EB:
2893 vlnctrl |= IXGBE_VLNCTRL_VFE;
2894 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2895 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2896 for (i = 0; i < adapter->num_rx_queues; i++) {
2897 j = adapter->rx_ring[i]->reg_idx;
2898 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2899 vlnctrl |= IXGBE_RXDCTL_VME;
2900 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2901 }
2902 break;
2903 default:
2904 break;
2905 }
2906}
2907
2485static void ixgbe_vlan_rx_register(struct net_device *netdev, 2908static void ixgbe_vlan_rx_register(struct net_device *netdev,
2486 struct vlan_group *grp) 2909 struct vlan_group *grp)
2487{ 2910{
2488 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2489 u32 ctrl;
2490 int i, j;
2491 2912
2492 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2913 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2493 ixgbe_irq_disable(adapter); 2914 ixgbe_irq_disable(adapter);
@@ -2498,25 +2919,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2498 * still receive traffic from a DCB-enabled host even if we're 2919 * still receive traffic from a DCB-enabled host even if we're
2499 * not in DCB mode. 2920 * not in DCB mode.
2500 */ 2921 */
2501 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2922 ixgbe_vlan_filter_enable(adapter);
2502
2503 /* Disable CFI check */
2504 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2505
2506 /* enable VLAN tag stripping */
2507 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2508 ctrl |= IXGBE_VLNCTRL_VME;
2509 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2510 for (i = 0; i < adapter->num_rx_queues; i++) {
2511 u32 ctrl;
2512 j = adapter->rx_ring[i]->reg_idx;
2513 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2514 ctrl |= IXGBE_RXDCTL_VME;
2515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2516 }
2517 }
2518
2519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2520 2923
2521 ixgbe_vlan_rx_add_vid(netdev, 0); 2924 ixgbe_vlan_rx_add_vid(netdev, 0);
2522 2925
@@ -2538,21 +2941,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2538 } 2941 }
2539} 2942}
2540 2943
2541static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2542{
2543 struct dev_mc_list *mc_ptr;
2544 u8 *addr = *mc_addr_ptr;
2545 *vmdq = 0;
2546
2547 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2548 if (mc_ptr->next)
2549 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2550 else
2551 *mc_addr_ptr = NULL;
2552
2553 return addr;
2554}
2555
2556/** 2944/**
2557 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 2945 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2558 * @netdev: network interface device structure 2946 * @netdev: network interface device structure
@@ -2566,42 +2954,36 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2566{ 2954{
2567 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2955 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2568 struct ixgbe_hw *hw = &adapter->hw; 2956 struct ixgbe_hw *hw = &adapter->hw;
2569 u32 fctrl, vlnctrl; 2957 u32 fctrl;
2570 u8 *addr_list = NULL;
2571 int addr_count = 0;
2572 2958
2573 /* Check for Promiscuous and All Multicast modes */ 2959 /* Check for Promiscuous and All Multicast modes */
2574 2960
2575 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2961 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2576 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2577 2962
2578 if (netdev->flags & IFF_PROMISC) { 2963 if (netdev->flags & IFF_PROMISC) {
2579 hw->addr_ctrl.user_set_promisc = 1; 2964 hw->addr_ctrl.user_set_promisc = true;
2580 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2965 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2581 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 2966 /* don't hardware filter vlans in promisc mode */
2967 ixgbe_vlan_filter_disable(adapter);
2582 } else { 2968 } else {
2583 if (netdev->flags & IFF_ALLMULTI) { 2969 if (netdev->flags & IFF_ALLMULTI) {
2584 fctrl |= IXGBE_FCTRL_MPE; 2970 fctrl |= IXGBE_FCTRL_MPE;
2585 fctrl &= ~IXGBE_FCTRL_UPE; 2971 fctrl &= ~IXGBE_FCTRL_UPE;
2586 } else { 2972 } else if (!hw->addr_ctrl.uc_set_promisc) {
2587 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2973 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2588 } 2974 }
2589 vlnctrl |= IXGBE_VLNCTRL_VFE; 2975 ixgbe_vlan_filter_enable(adapter);
2590 hw->addr_ctrl.user_set_promisc = 0; 2976 hw->addr_ctrl.user_set_promisc = false;
2591 } 2977 }
2592 2978
2593 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2979 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2594 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2595 2980
2596 /* reprogram secondary unicast list */ 2981 /* reprogram secondary unicast list */
2597 hw->mac.ops.update_uc_addr_list(hw, netdev); 2982 hw->mac.ops.update_uc_addr_list(hw, netdev);
2598 2983
2599 /* reprogram multicast list */ 2984 /* reprogram multicast list */
2600 addr_count = netdev_mc_count(netdev); 2985 hw->mac.ops.update_mc_addr_list(hw, netdev);
2601 if (addr_count) 2986
2602 addr_list = netdev->mc_list->dmi_addr;
2603 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2604 ixgbe_addr_list_itr);
2605 if (adapter->num_vfs) 2987 if (adapter->num_vfs)
2606 ixgbe_restore_vf_multicasts(adapter); 2988 ixgbe_restore_vf_multicasts(adapter);
2607} 2989}
@@ -2661,7 +3043,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2661static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3043static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2662{ 3044{
2663 struct ixgbe_hw *hw = &adapter->hw; 3045 struct ixgbe_hw *hw = &adapter->hw;
2664 u32 txdctl, vlnctrl; 3046 u32 txdctl;
2665 int i, j; 3047 int i, j;
2666 3048
2667 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3049 ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3061,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2679 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3061 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2680 } 3062 }
2681 /* Enable VLAN tag insert/strip */ 3063 /* Enable VLAN tag insert/strip */
2682 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3064 ixgbe_vlan_filter_enable(adapter);
2683 if (hw->mac.type == ixgbe_mac_82598EB) { 3065
2684 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2685 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2686 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2687 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2688 vlnctrl |= IXGBE_VLNCTRL_VFE;
2689 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2690 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2691 for (i = 0; i < adapter->num_rx_queues; i++) {
2692 j = adapter->rx_ring[i]->reg_idx;
2693 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2694 vlnctrl |= IXGBE_RXDCTL_VME;
2695 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2696 }
2697 }
2698 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3066 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2699} 3067}
2700 3068
@@ -2750,8 +3118,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2750 case ixgbe_phy_sfp_ftl: 3118 case ixgbe_phy_sfp_ftl:
2751 case ixgbe_phy_sfp_intel: 3119 case ixgbe_phy_sfp_intel:
2752 case ixgbe_phy_sfp_unknown: 3120 case ixgbe_phy_sfp_unknown:
2753 case ixgbe_phy_tw_tyco: 3121 case ixgbe_phy_sfp_passive_tyco:
2754 case ixgbe_phy_tw_unknown: 3122 case ixgbe_phy_sfp_passive_unknown:
3123 case ixgbe_phy_sfp_active_unknown:
3124 case ixgbe_phy_sfp_ftl_active:
2755 return true; 3125 return true;
2756 default: 3126 default:
2757 return false; 3127 return false;
@@ -2927,8 +3297,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2927 for (i = 0; i < adapter->num_tx_queues; i++) { 3297 for (i = 0; i < adapter->num_tx_queues; i++) {
2928 j = adapter->tx_ring[i]->reg_idx; 3298 j = adapter->tx_ring[i]->reg_idx;
2929 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3299 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2930 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 3300 if (adapter->rx_itr_setting == 0) {
2931 txdctl |= (8 << 16); 3301 /* cannot set wthresh when itr==0 */
3302 txdctl &= ~0x007F0000;
3303 } else {
3304 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3305 txdctl |= (8 << 16);
3306 }
2932 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3307 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2933 } 3308 }
2934 3309
@@ -3131,9 +3506,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3131 3506
3132 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3507 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3133 if (rx_buffer_info->dma) { 3508 if (rx_buffer_info->dma) {
3134 pci_unmap_single(pdev, rx_buffer_info->dma, 3509 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3135 rx_ring->rx_buf_len, 3510 rx_ring->rx_buf_len,
3136 PCI_DMA_FROMDEVICE); 3511 DMA_FROM_DEVICE);
3137 rx_buffer_info->dma = 0; 3512 rx_buffer_info->dma = 0;
3138 } 3513 }
3139 if (rx_buffer_info->skb) { 3514 if (rx_buffer_info->skb) {
@@ -3141,11 +3516,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3141 rx_buffer_info->skb = NULL; 3516 rx_buffer_info->skb = NULL;
3142 do { 3517 do {
3143 struct sk_buff *this = skb; 3518 struct sk_buff *this = skb;
3144 if (IXGBE_RSC_CB(this)->dma) { 3519 if (IXGBE_RSC_CB(this)->delay_unmap) {
3145 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, 3520 dma_unmap_single(&pdev->dev,
3521 IXGBE_RSC_CB(this)->dma,
3146 rx_ring->rx_buf_len, 3522 rx_ring->rx_buf_len,
3147 PCI_DMA_FROMDEVICE); 3523 DMA_FROM_DEVICE);
3148 IXGBE_RSC_CB(this)->dma = 0; 3524 IXGBE_RSC_CB(this)->dma = 0;
3525 IXGBE_RSC_CB(skb)->delay_unmap = false;
3149 } 3526 }
3150 skb = skb->prev; 3527 skb = skb->prev;
3151 dev_kfree_skb(this); 3528 dev_kfree_skb(this);
@@ -3154,8 +3531,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3154 if (!rx_buffer_info->page) 3531 if (!rx_buffer_info->page)
3155 continue; 3532 continue;
3156 if (rx_buffer_info->page_dma) { 3533 if (rx_buffer_info->page_dma) {
3157 pci_unmap_page(pdev, rx_buffer_info->page_dma, 3534 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3158 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 3535 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3159 rx_buffer_info->page_dma = 0; 3536 rx_buffer_info->page_dma = 0;
3160 } 3537 }
3161 put_page(rx_buffer_info->page); 3538 put_page(rx_buffer_info->page);
@@ -3268,22 +3645,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3268 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3645 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3269 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3646 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3270 3647
3271 netif_tx_disable(netdev);
3272
3273 IXGBE_WRITE_FLUSH(hw); 3648 IXGBE_WRITE_FLUSH(hw);
3274 msleep(10); 3649 msleep(10);
3275 3650
3276 netif_tx_stop_all_queues(netdev); 3651 netif_tx_stop_all_queues(netdev);
3277 3652
3278 ixgbe_irq_disable(adapter);
3279
3280 ixgbe_napi_disable_all(adapter);
3281
3282 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3653 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3283 del_timer_sync(&adapter->sfp_timer); 3654 del_timer_sync(&adapter->sfp_timer);
3284 del_timer_sync(&adapter->watchdog_timer); 3655 del_timer_sync(&adapter->watchdog_timer);
3285 cancel_work_sync(&adapter->watchdog_task); 3656 cancel_work_sync(&adapter->watchdog_task);
3286 3657
3658 netif_carrier_off(netdev);
3659 netif_tx_disable(netdev);
3660
3661 ixgbe_irq_disable(adapter);
3662
3663 ixgbe_napi_disable_all(adapter);
3664
3287 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3665 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3288 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3666 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3289 cancel_work_sync(&adapter->fdir_reinit_task); 3667 cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3301,8 +3679,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3301 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3679 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3302 ~IXGBE_DMATXCTL_TE)); 3680 ~IXGBE_DMATXCTL_TE));
3303 3681
3304 netif_carrier_off(netdev);
3305
3306 /* clear n-tuple filters that are cached */ 3682 /* clear n-tuple filters that are cached */
3307 ethtool_ntuple_flush(netdev); 3683 ethtool_ntuple_flush(netdev);
3308 3684
@@ -3379,6 +3755,8 @@ static void ixgbe_reset_task(struct work_struct *work)
3379 3755
3380 adapter->tx_timeout_count++; 3756 adapter->tx_timeout_count++;
3381 3757
3758 ixgbe_dump(adapter);
3759 netdev_err(adapter->netdev, "Reset adapter\n");
3382 ixgbe_reinit_locked(adapter); 3760 ixgbe_reinit_locked(adapter);
3383} 3761}
3384 3762
@@ -3479,12 +3857,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3479 adapter->num_tx_queues = 1; 3857 adapter->num_tx_queues = 1;
3480#ifdef CONFIG_IXGBE_DCB 3858#ifdef CONFIG_IXGBE_DCB
3481 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3859 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3482 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); 3860 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
3483 ixgbe_set_dcb_queues(adapter); 3861 ixgbe_set_dcb_queues(adapter);
3484 } 3862 }
3485#endif 3863#endif
3486 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3864 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3487 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); 3865 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
3488 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3866 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3489 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3867 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3490 ixgbe_set_fdir_queues(adapter); 3868 ixgbe_set_fdir_queues(adapter);
@@ -4095,7 +4473,6 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
4095 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 4473 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4096 pci_disable_msi(adapter->pdev); 4474 pci_disable_msi(adapter->pdev);
4097 } 4475 }
4098 return;
4099} 4476}
4100 4477
4101/** 4478/**
@@ -4381,8 +4758,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4381 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4758 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4382 tx_ring->size = ALIGN(tx_ring->size, 4096); 4759 tx_ring->size = ALIGN(tx_ring->size, 4096);
4383 4760
4384 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 4761 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4385 &tx_ring->dma); 4762 &tx_ring->dma, GFP_KERNEL);
4386 if (!tx_ring->desc) 4763 if (!tx_ring->desc)
4387 goto err; 4764 goto err;
4388 4765
@@ -4452,7 +4829,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4452 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 4829 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4453 rx_ring->size = ALIGN(rx_ring->size, 4096); 4830 rx_ring->size = ALIGN(rx_ring->size, 4096);
4454 4831
4455 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); 4832 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
4833 &rx_ring->dma, GFP_KERNEL);
4456 4834
4457 if (!rx_ring->desc) { 4835 if (!rx_ring->desc) {
4458 DPRINTK(PROBE, ERR, 4836 DPRINTK(PROBE, ERR,
@@ -4513,7 +4891,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4513 vfree(tx_ring->tx_buffer_info); 4891 vfree(tx_ring->tx_buffer_info);
4514 tx_ring->tx_buffer_info = NULL; 4892 tx_ring->tx_buffer_info = NULL;
4515 4893
4516 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 4894 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
4895 tx_ring->dma);
4517 4896
4518 tx_ring->desc = NULL; 4897 tx_ring->desc = NULL;
4519} 4898}
@@ -4550,7 +4929,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4550 vfree(rx_ring->rx_buffer_info); 4929 vfree(rx_ring->rx_buffer_info);
4551 rx_ring->rx_buffer_info = NULL; 4930 rx_ring->rx_buffer_info = NULL;
4552 4931
4553 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 4932 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
4933 rx_ring->dma);
4554 4934
4555 rx_ring->desc = NULL; 4935 rx_ring->desc = NULL;
4556} 4936}
@@ -5100,7 +5480,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5100 &(adapter->tx_ring[i]->reinit_state)); 5480 &(adapter->tx_ring[i]->reinit_state));
5101 } else { 5481 } else {
5102 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5482 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5103 "ignored adding FDIR ATR filters \n"); 5483 "ignored adding FDIR ATR filters\n");
5104 } 5484 }
5105 /* Done FDIR Re-initialization, enable transmits */ 5485 /* Done FDIR Re-initialization, enable transmits */
5106 netif_tx_start_all_queues(adapter->netdev); 5486 netif_tx_start_all_queues(adapter->netdev);
@@ -5420,10 +5800,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5420 5800
5421 tx_buffer_info->length = size; 5801 tx_buffer_info->length = size;
5422 tx_buffer_info->mapped_as_page = false; 5802 tx_buffer_info->mapped_as_page = false;
5423 tx_buffer_info->dma = pci_map_single(pdev, 5803 tx_buffer_info->dma = dma_map_single(&pdev->dev,
5424 skb->data + offset, 5804 skb->data + offset,
5425 size, PCI_DMA_TODEVICE); 5805 size, DMA_TO_DEVICE);
5426 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5806 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5427 goto dma_error; 5807 goto dma_error;
5428 tx_buffer_info->time_stamp = jiffies; 5808 tx_buffer_info->time_stamp = jiffies;
5429 tx_buffer_info->next_to_watch = i; 5809 tx_buffer_info->next_to_watch = i;
@@ -5456,12 +5836,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5456 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5836 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5457 5837
5458 tx_buffer_info->length = size; 5838 tx_buffer_info->length = size;
5459 tx_buffer_info->dma = pci_map_page(adapter->pdev, 5839 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
5460 frag->page, 5840 frag->page,
5461 offset, size, 5841 offset, size,
5462 PCI_DMA_TODEVICE); 5842 DMA_TO_DEVICE);
5463 tx_buffer_info->mapped_as_page = true; 5843 tx_buffer_info->mapped_as_page = true;
5464 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5844 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5465 goto dma_error; 5845 goto dma_error;
5466 tx_buffer_info->time_stamp = jiffies; 5846 tx_buffer_info->time_stamp = jiffies;
5467 tx_buffer_info->next_to_watch = i; 5847 tx_buffer_info->next_to_watch = i;
@@ -5697,7 +6077,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5697 } 6077 }
5698 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6078 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5699 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6079 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5700 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6080 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6081 skb->priority != TC_PRIO_CONTROL) {
5701 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6082 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
5702 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6083 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5703 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6084 tx_flags |= IXGBE_TX_FLAGS_VLAN;
@@ -5942,6 +6323,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5942 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 6323 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5943 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 6324 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
5944 .ndo_do_ioctl = ixgbe_ioctl, 6325 .ndo_do_ioctl = ixgbe_ioctl,
6326 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6327 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6328 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6329 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
5945#ifdef CONFIG_NET_POLL_CONTROLLER 6330#ifdef CONFIG_NET_POLL_CONTROLLER
5946 .ndo_poll_controller = ixgbe_netpoll, 6331 .ndo_poll_controller = ixgbe_netpoll,
5947#endif 6332#endif
@@ -6039,13 +6424,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6039 if (err) 6424 if (err)
6040 return err; 6425 return err;
6041 6426
6042 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 6427 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6043 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 6428 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6044 pci_using_dac = 1; 6429 pci_using_dac = 1;
6045 } else { 6430 } else {
6046 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6431 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6047 if (err) { 6432 if (err) {
6048 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 6433 err = dma_set_coherent_mask(&pdev->dev,
6434 DMA_BIT_MASK(32));
6049 if (err) { 6435 if (err) {
6050 dev_err(&pdev->dev, "No usable DMA " 6436 dev_err(&pdev->dev, "No usable DMA "
6051 "configuration, aborting\n"); 6437 "configuration, aborting\n");
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd386956..22d21af14783 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
475 msleep(edata); 475 msleep(edata);
476 break; 476 break;
477 case IXGBE_DATA_NL: 477 case IXGBE_DATA_NL:
478 hw_dbg(hw, "DATA: \n"); 478 hw_dbg(hw, "DATA:\n");
479 data_offset++; 479 data_offset++;
480 hw->eeprom.ops.read(hw, data_offset++, 480 hw->eeprom.ops.read(hw, data_offset++,
481 &phy_offset); 481 &phy_offset);
@@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
491 break; 491 break;
492 case IXGBE_CONTROL_NL: 492 case IXGBE_CONTROL_NL:
493 data_offset++; 493 data_offset++;
494 hw_dbg(hw, "CONTROL: \n"); 494 hw_dbg(hw, "CONTROL:\n");
495 if (edata == IXGBE_CONTROL_EOL_NL) { 495 if (edata == IXGBE_CONTROL_EOL_NL) {
496 hw_dbg(hw, "EOL\n"); 496 hw_dbg(hw, "EOL\n");
497 end_data = true; 497 end_data = true;
@@ -531,6 +531,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
531 u8 comp_codes_10g = 0; 531 u8 comp_codes_10g = 0;
532 u8 oui_bytes[3] = {0, 0, 0}; 532 u8 oui_bytes[3] = {0, 0, 0};
533 u8 cable_tech = 0; 533 u8 cable_tech = 0;
534 u8 cable_spec = 0;
534 u16 enforce_sfp = 0; 535 u16 enforce_sfp = 0;
535 536
536 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { 537 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
@@ -580,14 +581,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
580 else 581 else
581 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 582 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
582 } else if (hw->mac.type == ixgbe_mac_82599EB) { 583 } else if (hw->mac.type == ixgbe_mac_82599EB) {
583 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 584 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
584 if (hw->bus.lan_id == 0) 585 if (hw->bus.lan_id == 0)
585 hw->phy.sfp_type = 586 hw->phy.sfp_type =
586 ixgbe_sfp_type_da_cu_core0; 587 ixgbe_sfp_type_da_cu_core0;
587 else 588 else
588 hw->phy.sfp_type = 589 hw->phy.sfp_type =
589 ixgbe_sfp_type_da_cu_core1; 590 ixgbe_sfp_type_da_cu_core1;
590 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 591 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
592 hw->phy.ops.read_i2c_eeprom(
593 hw, IXGBE_SFF_CABLE_SPEC_COMP,
594 &cable_spec);
595 if (cable_spec &
596 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
597 if (hw->bus.lan_id == 0)
598 hw->phy.sfp_type =
599 ixgbe_sfp_type_da_act_lmt_core0;
600 else
601 hw->phy.sfp_type =
602 ixgbe_sfp_type_da_act_lmt_core1;
603 } else {
604 hw->phy.sfp_type =
605 ixgbe_sfp_type_unknown;
606 }
607 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
591 if (hw->bus.lan_id == 0) 608 if (hw->bus.lan_id == 0)
592 hw->phy.sfp_type = 609 hw->phy.sfp_type =
593 ixgbe_sfp_type_srlr_core0; 610 ixgbe_sfp_type_srlr_core0;
@@ -637,10 +654,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
637 switch (vendor_oui) { 654 switch (vendor_oui) {
638 case IXGBE_SFF_VENDOR_OUI_TYCO: 655 case IXGBE_SFF_VENDOR_OUI_TYCO:
639 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 656 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
640 hw->phy.type = ixgbe_phy_tw_tyco; 657 hw->phy.type =
658 ixgbe_phy_sfp_passive_tyco;
641 break; 659 break;
642 case IXGBE_SFF_VENDOR_OUI_FTL: 660 case IXGBE_SFF_VENDOR_OUI_FTL:
643 hw->phy.type = ixgbe_phy_sfp_ftl; 661 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
662 hw->phy.type = ixgbe_phy_sfp_ftl_active;
663 else
664 hw->phy.type = ixgbe_phy_sfp_ftl;
644 break; 665 break;
645 case IXGBE_SFF_VENDOR_OUI_AVAGO: 666 case IXGBE_SFF_VENDOR_OUI_AVAGO:
646 hw->phy.type = ixgbe_phy_sfp_avago; 667 hw->phy.type = ixgbe_phy_sfp_avago;
@@ -650,7 +671,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
650 break; 671 break;
651 default: 672 default:
652 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 673 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
653 hw->phy.type = ixgbe_phy_tw_unknown; 674 hw->phy.type =
675 ixgbe_phy_sfp_passive_unknown;
676 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
677 hw->phy.type =
678 ixgbe_phy_sfp_active_unknown;
654 else 679 else
655 hw->phy.type = ixgbe_phy_sfp_unknown; 680 hw->phy.type = ixgbe_phy_sfp_unknown;
656 break; 681 break;
@@ -658,7 +683,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
658 } 683 }
659 684
660 /* All passive DA cables are supported */ 685 /* All passive DA cables are supported */
661 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 686 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
687 IXGBE_SFF_DA_ACTIVE_CABLE)) {
662 status = 0; 688 status = 0;
663 goto out; 689 goto out;
664 } 690 }
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9cf5f3b4cc5d..c9c545941407 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -40,9 +40,12 @@
40#define IXGBE_SFF_1GBE_COMP_CODES 0x6 40#define IXGBE_SFF_1GBE_COMP_CODES 0x6
41#define IXGBE_SFF_10GBE_COMP_CODES 0x3 41#define IXGBE_SFF_10GBE_COMP_CODES 0x3
42#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 42#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
43#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
43 44
44/* Bitmasks */ 45/* Bitmasks */
45#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 46#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
47#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
48#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 49#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
47#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 50#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
48#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 51#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f30199..f6cee94ec8e8 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf) 48 int entries, u16 *hash_list, u32 vf)
49{ 49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 struct ixgbe_hw *hw = &adapter->hw;
51 int i; 52 int i;
53 u32 vector_bit;
54 u32 vector_reg;
55 u32 mta_reg;
52 56
53 /* only so many hash values supported */ 57 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 58 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 vfinfo->vf_mc_hashes[i] = hash_list[i];; 72 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 } 73 }
70 74
71 /* Flush and reset the mta with the new values */ 75 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
72 ixgbe_set_rx_mode(adapter->netdev); 76 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
77 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
78 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
79 mta_reg |= (1 << vector_bit);
80 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
81 }
73 82
74 return 0; 83 return 0;
75} 84}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
98 107
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) 108int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{ 109{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113} 111}
114 112
115 113
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf) 114void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
117{ 115{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE | 117 vmolr |= (IXGBE_VMOLR_ROMPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE | 118 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM); 119 IXGBE_VMOLR_BAM);
120 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE;
122 else
123 vmolr &= ~IXGBE_VMOLR_AUPE;
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 124 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124} 125}
125 126
127static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
128{
129 struct ixgbe_hw *hw = &adapter->hw;
130
131 if (vid)
132 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
133 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
134 else
135 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
136}
137
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 138inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{ 139{
128 struct ixgbe_hw *hw = &adapter->hw; 140 struct ixgbe_hw *hw = &adapter->hw;
129 141
130 /* reset offloads to defaults */ 142 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf); 143 if (adapter->vfinfo[vf].pf_vlan) {
132 144 ixgbe_set_vf_vlan(adapter, true,
145 adapter->vfinfo[vf].pf_vlan, vf);
146 ixgbe_set_vmvir(adapter,
147 (adapter->vfinfo[vf].pf_vlan |
148 (adapter->vfinfo[vf].pf_qos <<
149 VLAN_PRIO_SHIFT)), vf);
150 ixgbe_set_vmolr(hw, vf, false);
151 } else {
152 ixgbe_set_vmvir(adapter, 0, vf);
153 ixgbe_set_vmolr(hw, vf, true);
154 }
133 155
134 /* reset multicast table array for vf */ 156 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 157 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
263 case IXGBE_VF_SET_MAC_ADDR: 285 case IXGBE_VF_SET_MAC_ADDR:
264 { 286 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1])); 287 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac)) 288 if (is_valid_ether_addr(new_mac) &&
289 !adapter->vfinfo[vf].pf_set_mac)
267 ixgbe_set_vf_mac(adapter, vf, new_mac); 290 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else 291 else
269 retval = -1; 292 ixgbe_set_vf_mac(adapter,
293 vf, adapter->vfinfo[vf].vf_mac_addresses);
270 } 294 }
271 break; 295 break;
272 case IXGBE_VF_SET_MULTICAST: 296 case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
360 } 384 }
361} 385}
362 386
387int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
388{
389 struct ixgbe_adapter *adapter = netdev_priv(netdev);
390 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
391 return -EINVAL;
392 adapter->vfinfo[vf].pf_set_mac = true;
393 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
394 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
395 " change effective.");
396 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
397 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
398 " but the PF device is not up.\n");
399 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
400 " attempting to use the VF device.\n");
401 }
402 return ixgbe_set_vf_mac(adapter, vf, mac);
403}
404
405int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
406{
407 int err = 0;
408 struct ixgbe_adapter *adapter = netdev_priv(netdev);
409
410 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
411 return -EINVAL;
412 if (vlan || qos) {
413 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
414 if (err)
415 goto out;
416 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
417 ixgbe_set_vmolr(&adapter->hw, vf, false);
418 adapter->vfinfo[vf].pf_vlan = vlan;
419 adapter->vfinfo[vf].pf_qos = qos;
420 dev_info(&adapter->pdev->dev,
421 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
422 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
423 dev_warn(&adapter->pdev->dev,
424 "The VF VLAN has been set,"
425 " but the PF device is not up.\n");
426 dev_warn(&adapter->pdev->dev,
427 "Bring the PF device up before"
428 " attempting to use the VF device.\n");
429 }
430 } else {
431 err = ixgbe_set_vf_vlan(adapter, false,
432 adapter->vfinfo[vf].pf_vlan, vf);
433 ixgbe_set_vmvir(adapter, vlan, vf);
434 ixgbe_set_vmolr(&adapter->hw, vf, true);
435 adapter->vfinfo[vf].pf_vlan = 0;
436 adapter->vfinfo[vf].pf_qos = 0;
437 }
438out:
439 return err;
440}
441
442int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
443{
444 return -EOPNOTSUPP;
445}
446
447int ixgbe_ndo_get_vf_config(struct net_device *netdev,
448 int vf, struct ifla_vf_info *ivi)
449{
450 struct ixgbe_adapter *adapter = netdev_priv(netdev);
451 if (vf >= adapter->num_vfs)
452 return -EINVAL;
453 ivi->vf = vf;
454 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
455 ivi->tx_rate = 0;
456 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
457 ivi->qos = adapter->vfinfo[vf].pf_qos;
458 return 0;
459}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a1..184730ecdfb6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf); 32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); 34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf); 35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); 36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); 37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); 43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter); 44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
46int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
47 u8 qos);
48int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
49int ixgbe_ndo_get_vf_config(struct net_device *netdev,
50 int vf, struct ifla_vf_info *ivi);
45 51
46#endif /* _IXGBE_SRIOV_H_ */ 52#endif /* _IXGBE_SRIOV_H_ */
47 53
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 534affcc38ca..39b9be897439 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -73,6 +73,7 @@
73/* NVM Registers */ 73/* NVM Registers */
74#define IXGBE_EEC 0x10010 74#define IXGBE_EEC 0x10010
75#define IXGBE_EERD 0x10014 75#define IXGBE_EERD 0x10014
76#define IXGBE_EEWR 0x10018
76#define IXGBE_FLA 0x1001C 77#define IXGBE_FLA 0x1001C
77#define IXGBE_EEMNGCTL 0x10110 78#define IXGBE_EEMNGCTL 0x10110
78#define IXGBE_EEMNGDATA 0x10114 79#define IXGBE_EEMNGDATA 0x10114
@@ -219,6 +220,7 @@
219#define IXGBE_MTQC 0x08120 220#define IXGBE_MTQC 0x08120
220#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ 221#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
221#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ 222#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
223#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
222#define IXGBE_VT_CTL 0x051B0 224#define IXGBE_VT_CTL 0x051B0
223#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 225#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
224#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 226#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -698,6 +700,7 @@
698#define IXGBE_MREVID 0x11064 700#define IXGBE_MREVID 0x11064
699#define IXGBE_DCA_ID 0x11070 701#define IXGBE_DCA_ID 0x11070
700#define IXGBE_DCA_CTRL 0x11074 702#define IXGBE_DCA_CTRL 0x11074
703#define IXGBE_SWFW_SYNC IXGBE_GSSR
701 704
702/* PCIe registers 82599-specific */ 705/* PCIe registers 82599-specific */
703#define IXGBE_GCR_EXT 0x11050 706#define IXGBE_GCR_EXT 0x11050
@@ -1311,6 +1314,10 @@
1311#define IXGBE_VLVF_ENTRIES 64 1314#define IXGBE_VLVF_ENTRIES 64
1312#define IXGBE_VLVF_VLANID_MASK 0x00000FFF 1315#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1313 1316
1317/* Per VF Port VLAN insertion rules */
1318#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
1319#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
1320
1314#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1321#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1315 1322
1316/* STATUS Bit Masks */ 1323/* STATUS Bit Masks */
@@ -1458,8 +1465,9 @@
1458#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1465#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1459#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1466#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
1460#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 1467#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
1468#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
1461 1469
1462/* GSSR definitions */ 1470/* SW_FW_SYNC/GSSR definitions */
1463#define IXGBE_GSSR_EEP_SM 0x0001 1471#define IXGBE_GSSR_EEP_SM 0x0001
1464#define IXGBE_GSSR_PHY0_SM 0x0002 1472#define IXGBE_GSSR_PHY0_SM 0x0002
1465#define IXGBE_GSSR_PHY1_SM 0x0004 1473#define IXGBE_GSSR_PHY1_SM 0x0004
@@ -1479,6 +1487,8 @@
1479#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ 1487#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
1480#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ 1488#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
1481#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ 1489#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
1490#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
1491#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
1482/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1492/* EEPROM Addressing bits based on type (0-small, 1-large) */
1483#define IXGBE_EEC_ADDR_SIZE 0x00000400 1493#define IXGBE_EEC_ADDR_SIZE 0x00000400
1484#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ 1494#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
@@ -1534,10 +1544,12 @@
1534#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ 1544#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
1535 1545
1536/* EEPROM Read Register */ 1546/* EEPROM Read Register */
1537#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */ 1547#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
1538#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */ 1548#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
1539#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */ 1549#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
1540#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */ 1550#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
1551#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
1552#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
1541 1553
1542#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 1554#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
1543 1555
@@ -1545,9 +1557,15 @@
1545#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1557#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1546#endif 1558#endif
1547 1559
1548#ifndef IXGBE_EERD_ATTEMPTS 1560#ifndef IXGBE_EERD_EEWR_ATTEMPTS
1549/* Number of 5 microseconds we wait for EERD read to complete */ 1561/* Number of 5 microseconds we wait for EERD read and
1550#define IXGBE_EERD_ATTEMPTS 100000 1562 * EERW write to complete */
1563#define IXGBE_EERD_EEWR_ATTEMPTS 100000
1564#endif
1565
1566#ifndef IXGBE_FLUDONE_ATTEMPTS
1567/* # attempts we wait for flush update to complete */
1568#define IXGBE_FLUDONE_ATTEMPTS 20000
1551#endif 1569#endif
1552 1570
1553#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 1571#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
@@ -2090,6 +2108,7 @@ typedef u32 ixgbe_physical_layer;
2090#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 2108#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
2091#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 2109#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
2092#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2110#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
2111#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
2093 2112
2094/* Software ATR hash keys */ 2113/* Software ATR hash keys */
2095#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2114#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@@ -2159,10 +2178,12 @@ enum ixgbe_phy_type {
2159 ixgbe_phy_qt, 2178 ixgbe_phy_qt,
2160 ixgbe_phy_xaui, 2179 ixgbe_phy_xaui,
2161 ixgbe_phy_nl, 2180 ixgbe_phy_nl,
2162 ixgbe_phy_tw_tyco, 2181 ixgbe_phy_sfp_passive_tyco,
2163 ixgbe_phy_tw_unknown, 2182 ixgbe_phy_sfp_passive_unknown,
2183 ixgbe_phy_sfp_active_unknown,
2164 ixgbe_phy_sfp_avago, 2184 ixgbe_phy_sfp_avago,
2165 ixgbe_phy_sfp_ftl, 2185 ixgbe_phy_sfp_ftl,
2186 ixgbe_phy_sfp_ftl_active,
2166 ixgbe_phy_sfp_unknown, 2187 ixgbe_phy_sfp_unknown,
2167 ixgbe_phy_sfp_intel, 2188 ixgbe_phy_sfp_intel,
2168 ixgbe_phy_sfp_unsupported, 2189 ixgbe_phy_sfp_unsupported,
@@ -2190,6 +2211,8 @@ enum ixgbe_sfp_type {
2190 ixgbe_sfp_type_da_cu_core1 = 4, 2211 ixgbe_sfp_type_da_cu_core1 = 4,
2191 ixgbe_sfp_type_srlr_core0 = 5, 2212 ixgbe_sfp_type_srlr_core0 = 5,
2192 ixgbe_sfp_type_srlr_core1 = 6, 2213 ixgbe_sfp_type_srlr_core1 = 6,
2214 ixgbe_sfp_type_da_act_lmt_core0 = 7,
2215 ixgbe_sfp_type_da_act_lmt_core1 = 8,
2193 ixgbe_sfp_type_not_present = 0xFFFE, 2216 ixgbe_sfp_type_not_present = 0xFFFE,
2194 ixgbe_sfp_type_unknown = 0xFFFF 2217 ixgbe_sfp_type_unknown = 0xFFFF
2195}; 2218};
@@ -2263,6 +2286,7 @@ struct ixgbe_addr_filter_info {
2263 u32 mc_addr_in_rar_count; 2286 u32 mc_addr_in_rar_count;
2264 u32 mta_in_use; 2287 u32 mta_in_use;
2265 u32 overflow_promisc; 2288 u32 overflow_promisc;
2289 bool uc_set_promisc;
2266 bool user_set_promisc; 2290 bool user_set_promisc;
2267}; 2291};
2268 2292
@@ -2419,8 +2443,7 @@ struct ixgbe_mac_operations {
2419 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2443 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2420 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2444 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2421 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); 2445 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2422 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2446 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2423 ixgbe_mc_addr_itr);
2424 s32 (*enable_mc)(struct ixgbe_hw *); 2447 s32 (*enable_mc)(struct ixgbe_hw *);
2425 s32 (*disable_mc)(struct ixgbe_hw *); 2448 s32 (*disable_mc)(struct ixgbe_hw *);
2426 s32 (*clear_vfta)(struct ixgbe_hw *); 2449 s32 (*clear_vfta)(struct ixgbe_hw *);
@@ -2471,6 +2494,7 @@ struct ixgbe_mac_info {
2471 u32 mcft_size; 2494 u32 mcft_size;
2472 u32 vft_size; 2495 u32 vft_size;
2473 u32 num_rar_entries; 2496 u32 num_rar_entries;
2497 u32 rar_highwater;
2474 u32 max_tx_queues; 2498 u32 max_tx_queues;
2475 u32 max_rx_queues; 2499 u32 max_rx_queues;
2476 u32 max_msix_vectors; 2500 u32 max_msix_vectors;
@@ -2577,8 +2601,10 @@ struct ixgbe_info {
2577#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 2601#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
2578#define IXGBE_ERR_SFP_NOT_PRESENT -20 2602#define IXGBE_ERR_SFP_NOT_PRESENT -20
2579#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 2603#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
2604#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
2580#define IXGBE_ERR_FDIR_REINIT_FAILED -23 2605#define IXGBE_ERR_FDIR_REINIT_FAILED -23
2581#define IXGBE_ERR_EEPROM_VERSION -24 2606#define IXGBE_ERR_EEPROM_VERSION -24
2607#define IXGBE_ERR_NO_SPACE -25
2582#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2608#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2583 2609
2584#endif /* _IXGBE_TYPE_H_ */ 2610#endif /* _IXGBE_TYPE_H_ */