aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-05-11 03:18:36 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-24 01:44:16 -0400
commitc04f6ca84866ef207e009a08e4c34ca241df7aa2 (patch)
tree6888c0e5a354927887a037caa5947d781d6a3597 /drivers/net/ixgbe
parent03ecf91aae757eeb70763a3393227c4597c87b23 (diff)
ixgbe: update perfect filter framework to support retaining filters
This change is meant to update the internal framework of ixgbe so that perfect filters can be stored and tracked via software. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h18
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c603
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h25
4 files changed, 368 insertions, 280 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index d5674fc8bc02..5ea5b4c08fe0 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -543,16 +543,22 @@ extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
543extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 543extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
544extern int ethtool_ioctl(struct ifreq *ifr); 544extern int ethtool_ioctl(struct ifreq *ifr);
545extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 545extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
546extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 546extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
547extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 547extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
548extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 548extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
549 union ixgbe_atr_hash_dword input, 549 union ixgbe_atr_hash_dword input,
550 union ixgbe_atr_hash_dword common, 550 union ixgbe_atr_hash_dword common,
551 u8 queue); 551 u8 queue);
552extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 552extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
553 union ixgbe_atr_input *input, 553 union ixgbe_atr_input *input_mask);
554 struct ixgbe_atr_input_masks *input_masks, 554extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
555 u16 soft_id, u8 queue); 555 union ixgbe_atr_input *input,
556 u16 soft_id, u8 queue);
557extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
558 union ixgbe_atr_input *input,
559 u16 soft_id);
560extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
561 union ixgbe_atr_input *mask);
556extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 562extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
557 struct ixgbe_ring *ring); 563 struct ixgbe_ring *ring);
558extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, 564extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 4a6826bf9338..3b3dd4df4c5c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1107,115 +1107,87 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1107} 1107}
1108 1108
1109/** 1109/**
1110 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1110 * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer
1111 * @hw: pointer to hardware structure 1111 * @hw: pointer to hardware structure
1112 * @pballoc: which mode to allocate filters with 1112 * @pballoc: which mode to allocate filters with
1113 **/ 1113 **/
1114s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) 1114static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc)
1115{ 1115{
1116 u32 fdirctrl = 0; 1116 u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
1117 u32 current_rxpbsize = 0;
1117 int i; 1118 int i;
1118 1119
1119 /* Send interrupt when 64 filters are left */ 1120 /* reserve space for Flow Director filters */
1120 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1121
1122 /* Set the maximum length per hash bucket to 0xA filters */
1123 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1124
1125 switch (pballoc) { 1121 switch (pballoc) {
1126 case IXGBE_FDIR_PBALLOC_64K: 1122 case IXGBE_FDIR_PBALLOC_256K:
1127 /* 8k - 1 signature filters */ 1123 fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT;
1128 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1129 break; 1124 break;
1130 case IXGBE_FDIR_PBALLOC_128K: 1125 case IXGBE_FDIR_PBALLOC_128K:
1131 /* 16k - 1 signature filters */ 1126 fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT;
1132 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1133 break; 1127 break;
1134 case IXGBE_FDIR_PBALLOC_256K: 1128 case IXGBE_FDIR_PBALLOC_64K:
1135 /* 32k - 1 signature filters */ 1129 fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT;
1136 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1137 break; 1130 break;
1131 case IXGBE_FDIR_PBALLOC_NONE:
1138 default: 1132 default:
1139 /* bad value */ 1133 return IXGBE_ERR_PARAM;
1140 return IXGBE_ERR_CONFIG;
1141 } 1134 }
1142 1135
1143 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1136 /* determine current RX packet buffer size */
1144 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1137 for (i = 0; i < 8; i++)
1138 current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
1145 1139
1140 /* if there is already room for the filters do nothing */
1141 if (current_rxpbsize <= fdir_pbsize)
1142 return 0;
1146 1143
1147 /* Prime the keys for hashing */ 1144 if (current_rxpbsize > hw->mac.rx_pb_size) {
1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1145 /*
1149 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1146 * if rxpbsize is greater than max then HW max the Rx buffer
1150 1147 * sizes are unconfigured or misconfigured since HW default is
1151 /* 1148 * to give the full buffer to each traffic class resulting in
1152 * Poll init-done after we write the register. Estimated times: 1149 * the total size being buffer size 8x actual size
1153 * 10G: PBALLOC = 11b, timing is 60us 1150 *
1154 * 1G: PBALLOC = 11b, timing is 600us 1151 * This assumes no DCB since the RXPBSIZE registers appear to
1155 * 100M: PBALLOC = 11b, timing is 6ms 1152 * be unconfigured.
1156 * 1153 */
1157 * Multiple these timings by 4 if under full Rx load 1154 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize);
1158 * 1155 for (i = 1; i < 8; i++)
1159 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1156 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1160 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1157 } else {
1161 * this might not finish in our poll time, but we can live with that 1158 /*
1162 * for now. 1159 * Since the Rx packet buffer appears to have already been
1163 */ 1160 * configured we need to shrink each packet buffer by enough
1164 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1161 * to make room for the filters. As such we take each rxpbsize
1165 IXGBE_WRITE_FLUSH(hw); 1162 * value and multiply it by a fraction representing the size
1166 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1163 * needed over the size we currently have.
1167 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1164 *
1168 IXGBE_FDIRCTRL_INIT_DONE) 1165 * We need to reduce fdir_pbsize and current_rxpbsize to
1169 break; 1166 * 1/1024 of their original values in order to avoid
1170 usleep_range(1000, 2000); 1167 * overflowing the u32 being used to store rxpbsize.
1168 */
1169 fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT;
1170 current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT;
1171 for (i = 0; i < 8; i++) {
1172 u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
1173 rxpbsize *= fdir_pbsize;
1174 rxpbsize /= current_rxpbsize;
1175 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
1176 }
1171 } 1177 }
1172 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1173 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
1174 1178
1175 return 0; 1179 return 0;
1176} 1180}
1177 1181
1178/** 1182/**
1179 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1183 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1180 * @hw: pointer to hardware structure 1184 * @hw: pointer to hardware structure
1181 * @pballoc: which mode to allocate filters with 1185 * @fdirctrl: value to write to flow director control register
1182 **/ 1186 **/
1183s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) 1187static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1184{ 1188{
1185 u32 fdirctrl = 0;
1186 int i; 1189 int i;
1187 1190
1188 /* Send interrupt when 64 filters are left */
1189 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1190
1191 /* Initialize the drop queue to Rx queue 127 */
1192 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1193
1194 switch (pballoc) {
1195 case IXGBE_FDIR_PBALLOC_64K:
1196 /* 2k - 1 perfect filters */
1197 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1198 break;
1199 case IXGBE_FDIR_PBALLOC_128K:
1200 /* 4k - 1 perfect filters */
1201 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1202 break;
1203 case IXGBE_FDIR_PBALLOC_256K:
1204 /* 8k - 1 perfect filters */
1205 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1206 break;
1207 default:
1208 /* bad value */
1209 return IXGBE_ERR_CONFIG;
1210 }
1211
1212 /* Turn perfect match filtering on */
1213 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1214 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1215
1216 /* Move the flexible bytes to use the ethertype - shift 6 words */
1217 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1218
1219 /* Prime the keys for hashing */ 1191 /* Prime the keys for hashing */
1220 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1192 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1221 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1193 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
@@ -1233,10 +1205,6 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1233 * this might not finish in our poll time, but we can live with that 1205 * this might not finish in our poll time, but we can live with that
1234 * for now. 1206 * for now.
1235 */ 1207 */
1236
1237 /* Set the maximum length per hash bucket to 0xA filters */
1238 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1239
1240 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1208 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1241 IXGBE_WRITE_FLUSH(hw); 1209 IXGBE_WRITE_FLUSH(hw);
1242 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1210 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
@@ -1245,101 +1213,77 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1245 break; 1213 break;
1246 usleep_range(1000, 2000); 1214 usleep_range(1000, 2000);
1247 } 1215 }
1248 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1249 hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
1250 1216
1251 return 0; 1217 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1218 hw_dbg(hw, "Flow Director poll time exceeded!\n");
1252} 1219}
1253 1220
1254
1255/** 1221/**
1256 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR 1222 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1257 * @stream: input bitstream to compute the hash on 1223 * @hw: pointer to hardware structure
1258 * @key: 32-bit hash key 1224 * @fdirctrl: value to write to flow director control register, initially
1225 * contains just the value of the Rx packet buffer allocation
1259 **/ 1226 **/
1260static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, 1227s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1261 u32 key)
1262{ 1228{
1263 /* 1229 s32 err;
1264 * The algorithm is as follows:
1265 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1266 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1267 * and A[n] x B[n] is bitwise AND between same length strings
1268 *
1269 * K[n] is 16 bits, defined as:
1270 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1271 * for n modulo 32 < 15, K[n] =
1272 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1273 *
1274 * S[n] is 16 bits, defined as:
1275 * for n >= 15, S[n] = S[n:n - 15]
1276 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1277 *
1278 * To simplify for programming, the algorithm is implemented
1279 * in software this way:
1280 *
1281 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1282 *
1283 * for (i = 0; i < 352; i+=32)
1284 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1285 *
1286 * lo_hash_dword[15:0] ^= Stream[15:0];
1287 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1288 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1289 *
1290 * hi_hash_dword[31:0] ^= Stream[351:320];
1291 *
1292 * if(key[0])
1293 * hash[15:0] ^= Stream[15:0];
1294 *
1295 * for (i = 0; i < 16; i++) {
1296 * if (key[i])
1297 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1298 * if (key[i + 16])
1299 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1300 * }
1301 *
1302 */
1303 __be32 common_hash_dword = 0;
1304 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1305 u32 hash_result = 0;
1306 u8 i;
1307 1230
1308 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1231 /* Before enabling Flow Director, verify the Rx Packet Buffer size */
1309 flow_vm_vlan = ntohl(atr_input->dword_stream[0]); 1232 err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
1233 if (err)
1234 return err;
1310 1235
1311 /* generate common hash dword */ 1236 /*
1312 for (i = 10; i; i -= 2) 1237 * Continue setup of fdirctrl register bits:
1313 common_hash_dword ^= atr_input->dword_stream[i] ^ 1238 * Move the flexible bytes to use the ethertype - shift 6 words
1314 atr_input->dword_stream[i - 1]; 1239 * Set the maximum length per hash bucket to 0xA filters
1240 * Send interrupt when 64 filters are left
1241 */
1242 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1243 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1244 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1315 1245
1316 hi_hash_dword = ntohl(common_hash_dword); 1246 /* write hashes and fdirctrl register, poll for completion */
1247 ixgbe_fdir_enable_82599(hw, fdirctrl);
1317 1248
1318 /* low dword is word swapped version of common */ 1249 return 0;
1319 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1250}
1320 1251
1321 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1252/**
1322 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1253 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1254 * @hw: pointer to hardware structure
1255 * @fdirctrl: value to write to flow director control register, initially
1256 * contains just the value of the Rx packet buffer allocation
1257 **/
1258s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1259{
1260 s32 err;
1323 1261
1324 /* Process bits 0 and 16 */ 1262 /* Before enabling Flow Director, verify the Rx Packet Buffer size */
1325 if (key & 0x0001) hash_result ^= lo_hash_dword; 1263 err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
1326 if (key & 0x00010000) hash_result ^= hi_hash_dword; 1264 if (err)
1265 return err;
1327 1266
1328 /* 1267 /*
1329 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1268 * Continue setup of fdirctrl register bits:
1330 * delay this because bit 0 of the stream should not be processed 1269 * Turn perfect match filtering on
1331 * so we do not add the vlan until after bit 0 was processed 1270 * Report hash in RSS field of Rx wb descriptor
1271 * Initialize the drop queue
1272 * Move the flexible bytes to use the ethertype - shift 6 words
1273 * Set the maximum length per hash bucket to 0xA filters
1274 * Send interrupt when 64 (0x4 * 16) filters are left
1332 */ 1275 */
1333 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1276 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1277 IXGBE_FDIRCTRL_REPORT_STATUS |
1278 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1279 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1280 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1281 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1334 1282
1283 /* write hashes and fdirctrl register, poll for completion */
1284 ixgbe_fdir_enable_82599(hw, fdirctrl);
1335 1285
1336 /* process the remaining 30 bits in the key 2 bits at a time */ 1286 return 0;
1337 for (i = 15; i; i-- ) {
1338 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1339 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1340 }
1341
1342 return hash_result & IXGBE_ATR_HASH_MASK;
1343} 1287}
1344 1288
1345/* 1289/*
@@ -1476,7 +1420,6 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1476 */ 1420 */
1477 fdirhashcmd = (u64)fdircmd << 32; 1421 fdirhashcmd = (u64)fdircmd << 32;
1478 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1422 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1479
1480 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1423 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1481 1424
1482 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1425 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
@@ -1484,6 +1427,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1484 return 0; 1427 return 0;
1485} 1428}
1486 1429
1430#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1431do { \
1432 u32 n = (_n); \
1433 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1434 bucket_hash ^= lo_hash_dword >> n; \
1435 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1436 bucket_hash ^= hi_hash_dword >> n; \
1437} while (0);
1438
1439/**
1440 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1441 * @atr_input: input bitstream to compute the hash on
1442 * @input_mask: mask for the input bitstream
1443 *
1444 * This function serves two main purposes. First it applys the input_mask
1445 * to the atr_input resulting in a cleaned up atr_input data stream.
1446 * Secondly it computes the hash and stores it in the bkt_hash field at
1447 * the end of the input byte stream. This way it will be available for
1448 * future use without needing to recompute the hash.
1449 **/
1450void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1451 union ixgbe_atr_input *input_mask)
1452{
1453
1454 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1455 u32 bucket_hash = 0;
1456
1457 /* Apply masks to input data */
1458 input->dword_stream[0] &= input_mask->dword_stream[0];
1459 input->dword_stream[1] &= input_mask->dword_stream[1];
1460 input->dword_stream[2] &= input_mask->dword_stream[2];
1461 input->dword_stream[3] &= input_mask->dword_stream[3];
1462 input->dword_stream[4] &= input_mask->dword_stream[4];
1463 input->dword_stream[5] &= input_mask->dword_stream[5];
1464 input->dword_stream[6] &= input_mask->dword_stream[6];
1465 input->dword_stream[7] &= input_mask->dword_stream[7];
1466 input->dword_stream[8] &= input_mask->dword_stream[8];
1467 input->dword_stream[9] &= input_mask->dword_stream[9];
1468 input->dword_stream[10] &= input_mask->dword_stream[10];
1469
1470 /* record the flow_vm_vlan bits as they are a key part to the hash */
1471 flow_vm_vlan = ntohl(input->dword_stream[0]);
1472
1473 /* generate common hash dword */
1474 hi_hash_dword = ntohl(input->dword_stream[1] ^
1475 input->dword_stream[2] ^
1476 input->dword_stream[3] ^
1477 input->dword_stream[4] ^
1478 input->dword_stream[5] ^
1479 input->dword_stream[6] ^
1480 input->dword_stream[7] ^
1481 input->dword_stream[8] ^
1482 input->dword_stream[9] ^
1483 input->dword_stream[10]);
1484
1485 /* low dword is word swapped version of common */
1486 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1487
1488 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1489 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1490
1491 /* Process bits 0 and 16 */
1492 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1493
1494 /*
1495 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1496 * delay this because bit 0 of the stream should not be processed
1497 * so we do not add the vlan until after bit 0 was processed
1498 */
1499 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1500
1501 /* Process remaining 30 bit of the key */
1502 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1509 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1510 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1511 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1512 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1513 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1514 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1515 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1516 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1517
1518 /*
1519 * Limit hash to 13 bits since max bucket count is 8K.
1520 * Store result at the end of the input stream.
1521 */
1522 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1523}
1524
1487/** 1525/**
1488 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1526 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1489 * @input_mask: mask to be bit swapped 1527 * @input_mask: mask to be bit swapped
@@ -1493,11 +1531,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1493 * generate a correctly swapped value we need to bit swap the mask and that 1531 * generate a correctly swapped value we need to bit swap the mask and that
1494 * is what is accomplished by this function. 1532 * is what is accomplished by this function.
1495 **/ 1533 **/
1496static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) 1534static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1497{ 1535{
1498 u32 mask = ntohs(input_masks->dst_port_mask); 1536 u32 mask = ntohs(input_mask->formatted.dst_port);
1499 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1537 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1500 mask |= ntohs(input_masks->src_port_mask); 1538 mask |= ntohs(input_mask->formatted.src_port);
1501 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1539 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1502 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1540 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1503 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1541 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
@@ -1519,52 +1557,14 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
1519 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) 1557 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1520 1558
1521#define IXGBE_STORE_AS_BE16(_value) \ 1559#define IXGBE_STORE_AS_BE16(_value) \
1522 (((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1560 ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1523 1561
1524/** 1562s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1525 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1563 union ixgbe_atr_input *input_mask)
1526 * @hw: pointer to hardware structure
1527 * @input: input bitstream
1528 * @input_masks: bitwise masks for relevant fields
1529 * @soft_id: software index into the silicon hash tables for filter storage
1530 * @queue: queue index to direct traffic to
1531 *
1532 * Note that the caller to this function must lock before calling, since the
1533 * hardware writes must be protected from one another.
1534 **/
1535s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1536 union ixgbe_atr_input *input,
1537 struct ixgbe_atr_input_masks *input_masks,
1538 u16 soft_id, u8 queue)
1539{ 1564{
1540 u32 fdirhash; 1565 /* mask IPv6 since it is currently not supported */
1541 u32 fdircmd; 1566 u32 fdirm = IXGBE_FDIRM_DIPv6;
1542 u32 fdirport, fdirtcpm; 1567 u32 fdirtcpm;
1543 u32 fdirvlan;
1544 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1545 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1546 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
1547
1548 /*
1549 * Check flow_type formatting, and bail out before we touch the hardware
1550 * if there's a configuration issue
1551 */
1552 switch (input->formatted.flow_type) {
1553 case IXGBE_ATR_FLOW_TYPE_IPV4:
1554 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1555 fdirm |= IXGBE_FDIRM_L4P;
1556 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1557 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1558 hw_dbg(hw, " Error on src/dst port mask\n");
1559 return IXGBE_ERR_CONFIG;
1560 }
1561 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1562 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1563 break;
1564 default:
1565 hw_dbg(hw, " Error on flow type input\n");
1566 return IXGBE_ERR_CONFIG;
1567 }
1568 1568
1569 /* 1569 /*
1570 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1570 * Program the relevant mask registers. If src/dst_port or src/dst_addr
@@ -1576,41 +1576,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1576 * point in time. 1576 * point in time.
1577 */ 1577 */
1578 1578
1579 /* Program FDIRM */ 1579 /* verify bucket hash is cleared on hash generation */
1580 switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { 1580 if (input_mask->formatted.bkt_hash)
1581 case 0xEFFF: 1581 hw_dbg(hw, " bucket hash should always be 0 in mask\n");
1582 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ 1582
1583 fdirm &= ~IXGBE_FDIRM_VLANID; 1583 /* Program FDIRM and verify partial masks */
1584 case 0xE000: 1584 switch (input_mask->formatted.vm_pool & 0x7F) {
1585 /* Unmask VLAN prio - bit 1 */ 1585 case 0x0:
1586 fdirm &= ~IXGBE_FDIRM_VLANP; 1586 fdirm |= IXGBE_FDIRM_POOL;
1587 case 0x7F:
1587 break; 1588 break;
1588 case 0x0FFF: 1589 default:
1589 /* Unmask VLAN ID - bit 0 */ 1590 hw_dbg(hw, " Error on vm pool mask\n");
1590 fdirm &= ~IXGBE_FDIRM_VLANID; 1591 return IXGBE_ERR_CONFIG;
1592 }
1593
1594 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1595 case 0x0:
1596 fdirm |= IXGBE_FDIRM_L4P;
1597 if (input_mask->formatted.dst_port ||
1598 input_mask->formatted.src_port) {
1599 hw_dbg(hw, " Error on src/dst port mask\n");
1600 return IXGBE_ERR_CONFIG;
1601 }
1602 case IXGBE_ATR_L4TYPE_MASK:
1591 break; 1603 break;
1604 default:
1605 hw_dbg(hw, " Error on flow type mask\n");
1606 return IXGBE_ERR_CONFIG;
1607 }
1608
1609 switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
1592 case 0x0000: 1610 case 0x0000:
1593 /* do nothing, vlans already masked */ 1611 /* mask VLAN ID, fall through to mask VLAN priority */
1612 fdirm |= IXGBE_FDIRM_VLANID;
1613 case 0x0FFF:
1614 /* mask VLAN priority */
1615 fdirm |= IXGBE_FDIRM_VLANP;
1616 break;
1617 case 0xE000:
1618 /* mask VLAN ID only, fall through */
1619 fdirm |= IXGBE_FDIRM_VLANID;
1620 case 0xEFFF:
1621 /* no VLAN fields masked */
1594 break; 1622 break;
1595 default: 1623 default:
1596 hw_dbg(hw, " Error on VLAN mask\n"); 1624 hw_dbg(hw, " Error on VLAN mask\n");
1597 return IXGBE_ERR_CONFIG; 1625 return IXGBE_ERR_CONFIG;
1598 } 1626 }
1599 1627
1600 if (input_masks->flex_mask & 0xFFFF) { 1628 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1601 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { 1629 case 0x0000:
1602 hw_dbg(hw, " Error on flexible byte mask\n"); 1630 /* Mask Flex Bytes, fall through */
1603 return IXGBE_ERR_CONFIG; 1631 fdirm |= IXGBE_FDIRM_FLEX;
1604 } 1632 case 0xFFFF:
1605 /* Unmask Flex Bytes - bit 4 */ 1633 break;
1606 fdirm &= ~IXGBE_FDIRM_FLEX; 1634 default:
1635 hw_dbg(hw, " Error on flexible byte mask\n");
1636 return IXGBE_ERR_CONFIG;
1607 } 1637 }
1608 1638
1609 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1639 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1610 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1640 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1611 1641
1612 /* store the TCP/UDP port masks, bit reversed from port layout */ 1642 /* store the TCP/UDP port masks, bit reversed from port layout */
1613 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); 1643 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1614 1644
1615 /* write both the same so that UDP and TCP use the same mask */ 1645 /* write both the same so that UDP and TCP use the same mask */
1616 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1646 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
@@ -1618,24 +1648,32 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1618 1648
1619 /* store source and destination IP masks (big-enian) */ 1649 /* store source and destination IP masks (big-enian) */
1620 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1650 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1621 ~input_masks->src_ip_mask[0]); 1651 ~input_mask->formatted.src_ip[0]);
1622 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1652 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1623 ~input_masks->dst_ip_mask[0]); 1653 ~input_mask->formatted.dst_ip[0]);
1624 1654
1625 /* Apply masks to input data */ 1655 return 0;
1626 input->formatted.vlan_id &= input_masks->vlan_id_mask; 1656}
1627 input->formatted.flex_bytes &= input_masks->flex_mask;
1628 input->formatted.src_port &= input_masks->src_port_mask;
1629 input->formatted.dst_port &= input_masks->dst_port_mask;
1630 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
1631 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
1632 1657
1633 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1658s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1634 fdirvlan = 1659 union ixgbe_atr_input *input,
1635 IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); 1660 u16 soft_id, u8 queue)
1636 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1661{
1637 fdirvlan |= ntohs(input->formatted.vlan_id); 1662 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1638 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1663
1664 /* currently IPv6 is not supported, must be programmed with 0 */
1665 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1666 input->formatted.src_ip[0]);
1667 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1668 input->formatted.src_ip[1]);
1669 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1670 input->formatted.src_ip[2]);
1671
1672 /* record the source address (big-endian) */
1673 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1674
1675 /* record the first 32 bits of the destination address (big-endian) */
1676 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1639 1677
1640 /* record source and destination port (little-endian)*/ 1678 /* record source and destination port (little-endian)*/
1641 fdirport = ntohs(input->formatted.dst_port); 1679 fdirport = ntohs(input->formatted.dst_port);
@@ -1643,29 +1681,80 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1643 fdirport |= ntohs(input->formatted.src_port); 1681 fdirport |= ntohs(input->formatted.src_port);
1644 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1682 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1645 1683
1646 /* record the first 32 bits of the destination address (big-endian) */ 1684 /* record vlan (little-endian) and flex_bytes(big-endian) */
1647 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1685 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1686 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1687 fdirvlan |= ntohs(input->formatted.vlan_id);
1688 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1648 1689
1649 /* record the source address (big-endian) */ 1690 /* configure FDIRHASH register */
1650 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1691 fdirhash = input->formatted.bkt_hash;
1692 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1693 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1694
1695 /*
1696 * flush all previous writes to make certain registers are
1697 * programmed prior to issuing the command
1698 */
1699 IXGBE_WRITE_FLUSH(hw);
1651 1700
1652 /* configure FDIRCMD register */ 1701 /* configure FDIRCMD register */
1653 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1702 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1654 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1703 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1704 if (queue == IXGBE_FDIR_DROP_QUEUE)
1705 fdircmd |= IXGBE_FDIRCMD_DROP;
1655 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1706 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1656 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1707 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1708 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1657 1709
1658 /* we only want the bucket hash so drop the upper 16 bits */
1659 fdirhash = ixgbe_atr_compute_hash_82599(input,
1660 IXGBE_ATR_BUCKET_HASH_KEY);
1661 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1662
1663 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1664 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1710 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1665 1711
1666 return 0; 1712 return 0;
1667} 1713}
1668 1714
1715s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1716 union ixgbe_atr_input *input,
1717 u16 soft_id)
1718{
1719 u32 fdirhash;
1720 u32 fdircmd = 0;
1721 u32 retry_count;
1722 s32 err = 0;
1723
1724 /* configure FDIRHASH register */
1725 fdirhash = input->formatted.bkt_hash;
1726 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1727 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1728
1729 /* flush hash to HW */
1730 IXGBE_WRITE_FLUSH(hw);
1731
1732 /* Query if filter is present */
1733 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1734
1735 for (retry_count = 10; retry_count; retry_count--) {
1736 /* allow 10us for query to process */
1737 udelay(10);
1738 /* verify query completed successfully */
1739 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1740 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1741 break;
1742 }
1743
1744 if (!retry_count)
1745 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1746
1747 /* if filter exists in hardware then remove it */
1748 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1749 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1750 IXGBE_WRITE_FLUSH(hw);
1751 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1752 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1753 }
1754
1755 return err;
1756}
1757
1669/** 1758/**
1670 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1759 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1671 * @hw: pointer to hardware structure 1760 * @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 5483b9c3e2c0..e177b5d061fe 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5130,7 +5130,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5130 adapter->atr_sample_rate = 20; 5130 adapter->atr_sample_rate = 20;
5131 adapter->ring_feature[RING_F_FDIR].indices = 5131 adapter->ring_feature[RING_F_FDIR].indices =
5132 IXGBE_MAX_FDIR_INDICES; 5132 IXGBE_MAX_FDIR_INDICES;
5133 adapter->fdir_pballoc = 0; 5133 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5134#ifdef IXGBE_FCOE 5134#ifdef IXGBE_FCOE
5135 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 5135 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5136 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 5136 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9a499a61d141..8b1abd47056f 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2056,9 +2056,10 @@ enum {
2056#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) 2056#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
2057 2057
2058enum ixgbe_fdir_pballoc_type { 2058enum ixgbe_fdir_pballoc_type {
2059 IXGBE_FDIR_PBALLOC_64K = 0, 2059 IXGBE_FDIR_PBALLOC_NONE = 0,
2060 IXGBE_FDIR_PBALLOC_128K, 2060 IXGBE_FDIR_PBALLOC_64K = 1,
2061 IXGBE_FDIR_PBALLOC_256K, 2061 IXGBE_FDIR_PBALLOC_128K = 2,
2062 IXGBE_FDIR_PBALLOC_256K = 3,
2062}; 2063};
2063#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 2064#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
2064 2065
@@ -2112,7 +2113,7 @@ enum ixgbe_fdir_pballoc_type {
2112#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 2113#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
2113#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 2114#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
2114#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 2115#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
2115#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007 2116#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
2116#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 2117#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
2117#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 2118#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
2118#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 2119#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
@@ -2131,6 +2132,8 @@ enum ixgbe_fdir_pballoc_type {
2131#define IXGBE_FDIR_INIT_DONE_POLL 10 2132#define IXGBE_FDIR_INIT_DONE_POLL 10
2132#define IXGBE_FDIRCMD_CMD_POLL 10 2133#define IXGBE_FDIRCMD_CMD_POLL 10
2133 2134
2135#define IXGBE_FDIR_DROP_QUEUE 127
2136
2134/* Manageablility Host Interface defines */ 2137/* Manageablility Host Interface defines */
2135#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ 2138#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
2136#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ 2139#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
@@ -2350,7 +2353,7 @@ union ixgbe_atr_input {
2350 * src_port - 2 bytes 2353 * src_port - 2 bytes
2351 * dst_port - 2 bytes 2354 * dst_port - 2 bytes
2352 * flex_bytes - 2 bytes 2355 * flex_bytes - 2 bytes
2353 * rsvd0 - 2 bytes - space reserved must be 0. 2356 * bkt_hash - 2 bytes
2354 */ 2357 */
2355 struct { 2358 struct {
2356 u8 vm_pool; 2359 u8 vm_pool;
@@ -2361,7 +2364,7 @@ union ixgbe_atr_input {
2361 __be16 src_port; 2364 __be16 src_port;
2362 __be16 dst_port; 2365 __be16 dst_port;
2363 __be16 flex_bytes; 2366 __be16 flex_bytes;
2364 __be16 rsvd0; 2367 __be16 bkt_hash;
2365 } formatted; 2368 } formatted;
2366 __be32 dword_stream[11]; 2369 __be32 dword_stream[11];
2367}; 2370};
@@ -2382,16 +2385,6 @@ union ixgbe_atr_hash_dword {
2382 __be32 dword; 2385 __be32 dword;
2383}; 2386};
2384 2387
2385struct ixgbe_atr_input_masks {
2386 __be16 rsvd0;
2387 __be16 vlan_id_mask;
2388 __be32 dst_ip_mask[4];
2389 __be32 src_ip_mask[4];
2390 __be16 src_port_mask;
2391 __be16 dst_port_mask;
2392 __be16 flex_mask;
2393};
2394
2395enum ixgbe_eeprom_type { 2388enum ixgbe_eeprom_type {
2396 ixgbe_eeprom_uninitialized = 0, 2389 ixgbe_eeprom_uninitialized = 0,
2397 ixgbe_eeprom_spi, 2390 ixgbe_eeprom_spi,