diff options
author | Emil Tantilov <emil.s.tantilov@intel.com> | 2011-02-17 06:34:58 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-03-03 06:11:39 -0500 |
commit | c700f4e6f55c42c9aeacf365bd178f97625e00df (patch) | |
tree | ded1be112befab38829f5cf23f53655098b60b51 /drivers/net/ixgbe/ixgbe_common.c | |
parent | 26d6899ba775ed056bd73107e3f4427ff9247f75 (diff) |
ixgbe: Bounds checking for set_rar, clear_rar, set_vmdq, clear_vmdq
This change makes it so that out of bounds requests to these calls will
now return IXGBE_ERR_INVALID_ARGUMENT instead of returning 0.
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_common.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 153 |
1 files changed, 78 insertions, 75 deletions
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 7e3bb559f42d..882a35092024 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -1239,37 +1239,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, | |||
1239 | u32 rar_low, rar_high; | 1239 | u32 rar_low, rar_high; |
1240 | u32 rar_entries = hw->mac.num_rar_entries; | 1240 | u32 rar_entries = hw->mac.num_rar_entries; |
1241 | 1241 | ||
1242 | /* Make sure we are using a valid rar index range */ | ||
1243 | if (index >= rar_entries) { | ||
1244 | hw_dbg(hw, "RAR index %d is out of range.\n", index); | ||
1245 | return IXGBE_ERR_INVALID_ARGUMENT; | ||
1246 | } | ||
1247 | |||
1242 | /* setup VMDq pool selection before this RAR gets enabled */ | 1248 | /* setup VMDq pool selection before this RAR gets enabled */ |
1243 | hw->mac.ops.set_vmdq(hw, index, vmdq); | 1249 | hw->mac.ops.set_vmdq(hw, index, vmdq); |
1244 | 1250 | ||
1245 | /* Make sure we are using a valid rar index range */ | 1251 | /* |
1246 | if (index < rar_entries) { | 1252 | * HW expects these in little endian so we reverse the byte |
1247 | /* | 1253 | * order from network order (big endian) to little endian |
1248 | * HW expects these in little endian so we reverse the byte | 1254 | */ |
1249 | * order from network order (big endian) to little endian | 1255 | rar_low = ((u32)addr[0] | |
1250 | */ | 1256 | ((u32)addr[1] << 8) | |
1251 | rar_low = ((u32)addr[0] | | 1257 | ((u32)addr[2] << 16) | |
1252 | ((u32)addr[1] << 8) | | 1258 | ((u32)addr[3] << 24)); |
1253 | ((u32)addr[2] << 16) | | 1259 | /* |
1254 | ((u32)addr[3] << 24)); | 1260 | * Some parts put the VMDq setting in the extra RAH bits, |
1255 | /* | 1261 | * so save everything except the lower 16 bits that hold part |
1256 | * Some parts put the VMDq setting in the extra RAH bits, | 1262 | * of the address and the address valid bit. |
1257 | * so save everything except the lower 16 bits that hold part | 1263 | */ |
1258 | * of the address and the address valid bit. | 1264 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); |
1259 | */ | 1265 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); |
1260 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | 1266 | rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); |
1261 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); | ||
1262 | rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); | ||
1263 | 1267 | ||
1264 | if (enable_addr != 0) | 1268 | if (enable_addr != 0) |
1265 | rar_high |= IXGBE_RAH_AV; | 1269 | rar_high |= IXGBE_RAH_AV; |
1266 | 1270 | ||
1267 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); | 1271 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); |
1268 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | 1272 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); |
1269 | } else { | ||
1270 | hw_dbg(hw, "RAR index %d is out of range.\n", index); | ||
1271 | return IXGBE_ERR_RAR_INDEX; | ||
1272 | } | ||
1273 | 1273 | ||
1274 | return 0; | 1274 | return 0; |
1275 | } | 1275 | } |
@@ -1287,22 +1287,22 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) | |||
1287 | u32 rar_entries = hw->mac.num_rar_entries; | 1287 | u32 rar_entries = hw->mac.num_rar_entries; |
1288 | 1288 | ||
1289 | /* Make sure we are using a valid rar index range */ | 1289 | /* Make sure we are using a valid rar index range */ |
1290 | if (index < rar_entries) { | 1290 | if (index >= rar_entries) { |
1291 | /* | ||
1292 | * Some parts put the VMDq setting in the extra RAH bits, | ||
1293 | * so save everything except the lower 16 bits that hold part | ||
1294 | * of the address and the address valid bit. | ||
1295 | */ | ||
1296 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | ||
1297 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); | ||
1298 | |||
1299 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); | ||
1300 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | ||
1301 | } else { | ||
1302 | hw_dbg(hw, "RAR index %d is out of range.\n", index); | 1291 | hw_dbg(hw, "RAR index %d is out of range.\n", index); |
1303 | return IXGBE_ERR_RAR_INDEX; | 1292 | return IXGBE_ERR_INVALID_ARGUMENT; |
1304 | } | 1293 | } |
1305 | 1294 | ||
1295 | /* | ||
1296 | * Some parts put the VMDq setting in the extra RAH bits, | ||
1297 | * so save everything except the lower 16 bits that hold part | ||
1298 | * of the address and the address valid bit. | ||
1299 | */ | ||
1300 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); | ||
1301 | rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); | ||
1302 | |||
1303 | IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); | ||
1304 | IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); | ||
1305 | |||
1306 | /* clear VMDq pool/queue selection for this RAR */ | 1306 | /* clear VMDq pool/queue selection for this RAR */ |
1307 | hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); | 1307 | hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); |
1308 | 1308 | ||
@@ -2468,37 +2468,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | |||
2468 | u32 mpsar_lo, mpsar_hi; | 2468 | u32 mpsar_lo, mpsar_hi; |
2469 | u32 rar_entries = hw->mac.num_rar_entries; | 2469 | u32 rar_entries = hw->mac.num_rar_entries; |
2470 | 2470 | ||
2471 | if (rar < rar_entries) { | 2471 | /* Make sure we are using a valid rar index range */ |
2472 | mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); | 2472 | if (rar >= rar_entries) { |
2473 | mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); | 2473 | hw_dbg(hw, "RAR index %d is out of range.\n", rar); |
2474 | return IXGBE_ERR_INVALID_ARGUMENT; | ||
2475 | } | ||
2474 | 2476 | ||
2475 | if (!mpsar_lo && !mpsar_hi) | 2477 | mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); |
2476 | goto done; | 2478 | mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); |
2477 | 2479 | ||
2478 | if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { | 2480 | if (!mpsar_lo && !mpsar_hi) |
2479 | if (mpsar_lo) { | 2481 | goto done; |
2480 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); | ||
2481 | mpsar_lo = 0; | ||
2482 | } | ||
2483 | if (mpsar_hi) { | ||
2484 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); | ||
2485 | mpsar_hi = 0; | ||
2486 | } | ||
2487 | } else if (vmdq < 32) { | ||
2488 | mpsar_lo &= ~(1 << vmdq); | ||
2489 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); | ||
2490 | } else { | ||
2491 | mpsar_hi &= ~(1 << (vmdq - 32)); | ||
2492 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); | ||
2493 | } | ||
2494 | 2482 | ||
2495 | /* was that the last pool using this rar? */ | 2483 | if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { |
2496 | if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) | 2484 | if (mpsar_lo) { |
2497 | hw->mac.ops.clear_rar(hw, rar); | 2485 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); |
2486 | mpsar_lo = 0; | ||
2487 | } | ||
2488 | if (mpsar_hi) { | ||
2489 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); | ||
2490 | mpsar_hi = 0; | ||
2491 | } | ||
2492 | } else if (vmdq < 32) { | ||
2493 | mpsar_lo &= ~(1 << vmdq); | ||
2494 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); | ||
2498 | } else { | 2495 | } else { |
2499 | hw_dbg(hw, "RAR index %d is out of range.\n", rar); | 2496 | mpsar_hi &= ~(1 << (vmdq - 32)); |
2497 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); | ||
2500 | } | 2498 | } |
2501 | 2499 | ||
2500 | /* was that the last pool using this rar? */ | ||
2501 | if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) | ||
2502 | hw->mac.ops.clear_rar(hw, rar); | ||
2502 | done: | 2503 | done: |
2503 | return 0; | 2504 | return 0; |
2504 | } | 2505 | } |
@@ -2514,18 +2515,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | |||
2514 | u32 mpsar; | 2515 | u32 mpsar; |
2515 | u32 rar_entries = hw->mac.num_rar_entries; | 2516 | u32 rar_entries = hw->mac.num_rar_entries; |
2516 | 2517 | ||
2517 | if (rar < rar_entries) { | 2518 | /* Make sure we are using a valid rar index range */ |
2518 | if (vmdq < 32) { | 2519 | if (rar >= rar_entries) { |
2519 | mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); | ||
2520 | mpsar |= 1 << vmdq; | ||
2521 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); | ||
2522 | } else { | ||
2523 | mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); | ||
2524 | mpsar |= 1 << (vmdq - 32); | ||
2525 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); | ||
2526 | } | ||
2527 | } else { | ||
2528 | hw_dbg(hw, "RAR index %d is out of range.\n", rar); | 2520 | hw_dbg(hw, "RAR index %d is out of range.\n", rar); |
2521 | return IXGBE_ERR_INVALID_ARGUMENT; | ||
2522 | } | ||
2523 | |||
2524 | if (vmdq < 32) { | ||
2525 | mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); | ||
2526 | mpsar |= 1 << vmdq; | ||
2527 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); | ||
2528 | } else { | ||
2529 | mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); | ||
2530 | mpsar |= 1 << (vmdq - 32); | ||
2531 | IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); | ||
2529 | } | 2532 | } |
2530 | return 0; | 2533 | return 0; |
2531 | } | 2534 | } |