diff options
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bfa_ioc.c | 650 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bfa_ioc.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | 40 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bfi.h | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bfi_enet.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bna.h | 24 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bna_enet.c | 58 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bna_hw_defs.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 251 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bna_types.h | 57 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.c | 554 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.h | 25 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad_ethtool.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/brocade/bna/cna.h | 4 |
14 files changed, 1420 insertions, 292 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 6f3cac060f29..537bba14f913 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
@@ -22,6 +22,14 @@ | |||
22 | 22 | ||
23 | /* IOC local definitions */ | 23 | /* IOC local definitions */ |
24 | 24 | ||
25 | #define bfa_ioc_state_disabled(__sm) \ | ||
26 | (((__sm) == BFI_IOC_UNINIT) || \ | ||
27 | ((__sm) == BFI_IOC_INITING) || \ | ||
28 | ((__sm) == BFI_IOC_HWINIT) || \ | ||
29 | ((__sm) == BFI_IOC_DISABLED) || \ | ||
30 | ((__sm) == BFI_IOC_FAIL) || \ | ||
31 | ((__sm) == BFI_IOC_CFG_DISABLED)) | ||
32 | |||
25 | /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ | 33 | /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ |
26 | 34 | ||
27 | #define bfa_ioc_firmware_lock(__ioc) \ | 35 | #define bfa_ioc_firmware_lock(__ioc) \ |
@@ -42,6 +50,14 @@ | |||
42 | ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) | 50 | ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) |
43 | #define bfa_ioc_sync_complete(__ioc) \ | 51 | #define bfa_ioc_sync_complete(__ioc) \ |
44 | ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) | 52 | ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) |
53 | #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ | ||
54 | ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) | ||
55 | #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ | ||
56 | ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) | ||
57 | #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ | ||
58 | ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) | ||
59 | #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ | ||
60 | ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) | ||
45 | 61 | ||
46 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ | 62 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ |
47 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ | 63 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ |
@@ -76,8 +92,8 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); | |||
76 | static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); | 92 | static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); |
77 | static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); | 93 | static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); |
78 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); | 94 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); |
79 | static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, | 95 | static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, |
80 | u32 boot_param); | 96 | enum bfi_fwboot_type boot_type, u32 boot_param); |
81 | static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); | 97 | static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); |
82 | static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, | 98 | static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, |
83 | char *serial_num); | 99 | char *serial_num); |
@@ -860,7 +876,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) | |||
860 | */ | 876 | */ |
861 | 877 | ||
862 | case IOCPF_E_TIMEOUT: | 878 | case IOCPF_E_TIMEOUT: |
863 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | 879 | bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); |
864 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); | 880 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); |
865 | break; | 881 | break; |
866 | 882 | ||
@@ -949,7 +965,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) | |||
949 | case IOCPF_E_SEMLOCKED: | 965 | case IOCPF_E_SEMLOCKED: |
950 | bfa_ioc_notify_fail(ioc); | 966 | bfa_ioc_notify_fail(ioc); |
951 | bfa_ioc_sync_leave(ioc); | 967 | bfa_ioc_sync_leave(ioc); |
952 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | 968 | bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); |
953 | bfa_nw_ioc_hw_sem_release(ioc); | 969 | bfa_nw_ioc_hw_sem_release(ioc); |
954 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | 970 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); |
955 | break; | 971 | break; |
@@ -1031,7 +1047,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) | |||
1031 | bfa_ioc_notify_fail(ioc); | 1047 | bfa_ioc_notify_fail(ioc); |
1032 | if (!iocpf->auto_recover) { | 1048 | if (!iocpf->auto_recover) { |
1033 | bfa_ioc_sync_leave(ioc); | 1049 | bfa_ioc_sync_leave(ioc); |
1034 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | 1050 | bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); |
1035 | bfa_nw_ioc_hw_sem_release(ioc); | 1051 | bfa_nw_ioc_hw_sem_release(ioc); |
1036 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); | 1052 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); |
1037 | } else { | 1053 | } else { |
@@ -1131,6 +1147,25 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg) | |||
1131 | writel(1, sem_reg); | 1147 | writel(1, sem_reg); |
1132 | } | 1148 | } |
1133 | 1149 | ||
1150 | /* Invalidate fwver signature */ | ||
1151 | enum bfa_status | ||
1152 | bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc) | ||
1153 | { | ||
1154 | u32 pgnum, pgoff; | ||
1155 | u32 loff = 0; | ||
1156 | enum bfi_ioc_state ioc_fwstate; | ||
1157 | |||
1158 | ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); | ||
1159 | if (!bfa_ioc_state_disabled(ioc_fwstate)) | ||
1160 | return BFA_STATUS_ADAPTER_ENABLED; | ||
1161 | |||
1162 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | ||
1163 | pgoff = PSS_SMEM_PGOFF(loff); | ||
1164 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | ||
1165 | writel(BFI_IOC_FW_INV_SIGN, ioc->ioc_regs.smem_page_start + loff); | ||
1166 | return BFA_STATUS_OK; | ||
1167 | } | ||
1168 | |||
1134 | /* Clear fwver hdr */ | 1169 | /* Clear fwver hdr */ |
1135 | static void | 1170 | static void |
1136 | bfa_ioc_fwver_clear(struct bfa_ioc *ioc) | 1171 | bfa_ioc_fwver_clear(struct bfa_ioc *ioc) |
@@ -1162,7 +1197,7 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) | |||
1162 | r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); | 1197 | r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); |
1163 | } | 1198 | } |
1164 | 1199 | ||
1165 | fwstate = readl(ioc->ioc_regs.ioc_fwstate); | 1200 | fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); |
1166 | if (fwstate == BFI_IOC_UNINIT) { | 1201 | if (fwstate == BFI_IOC_UNINIT) { |
1167 | writel(1, ioc->ioc_regs.ioc_init_sem_reg); | 1202 | writel(1, ioc->ioc_regs.ioc_init_sem_reg); |
1168 | return; | 1203 | return; |
@@ -1176,8 +1211,8 @@ bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) | |||
1176 | } | 1211 | } |
1177 | 1212 | ||
1178 | bfa_ioc_fwver_clear(ioc); | 1213 | bfa_ioc_fwver_clear(ioc); |
1179 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); | 1214 | bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); |
1180 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); | 1215 | bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); |
1181 | 1216 | ||
1182 | /* | 1217 | /* |
1183 | * Try to lock and then unlock the semaphore. | 1218 | * Try to lock and then unlock the semaphore. |
@@ -1309,22 +1344,510 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) | |||
1309 | } | 1344 | } |
1310 | } | 1345 | } |
1311 | 1346 | ||
1312 | /* Returns TRUE if same. */ | 1347 | static bool |
1348 | bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, | ||
1349 | struct bfi_ioc_image_hdr *fwhdr_2) | ||
1350 | { | ||
1351 | int i; | ||
1352 | |||
1353 | for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { | ||
1354 | if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) | ||
1355 | return false; | ||
1356 | } | ||
1357 | |||
1358 | return true; | ||
1359 | } | ||
1360 | |||
1361 | /* Returns TRUE if major minor and maintainence are same. | ||
1362 | * If patch version are same, check for MD5 Checksum to be same. | ||
1363 | */ | ||
1364 | static bool | ||
1365 | bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, | ||
1366 | struct bfi_ioc_image_hdr *fwhdr_to_cmp) | ||
1367 | { | ||
1368 | if (drv_fwhdr->signature != fwhdr_to_cmp->signature) | ||
1369 | return false; | ||
1370 | if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) | ||
1371 | return false; | ||
1372 | if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) | ||
1373 | return false; | ||
1374 | if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) | ||
1375 | return false; | ||
1376 | if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && | ||
1377 | drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && | ||
1378 | drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) | ||
1379 | return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); | ||
1380 | |||
1381 | return true; | ||
1382 | } | ||
1383 | |||
1384 | static bool | ||
1385 | bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) | ||
1386 | { | ||
1387 | if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) | ||
1388 | return false; | ||
1389 | |||
1390 | return true; | ||
1391 | } | ||
1392 | |||
1393 | static bool | ||
1394 | fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) | ||
1395 | { | ||
1396 | if (fwhdr->fwver.phase == 0 && | ||
1397 | fwhdr->fwver.build == 0) | ||
1398 | return false; | ||
1399 | |||
1400 | return true; | ||
1401 | } | ||
1402 | |||
1403 | /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ | ||
1404 | static enum bfi_ioc_img_ver_cmp | ||
1405 | bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, | ||
1406 | struct bfi_ioc_image_hdr *fwhdr_to_cmp) | ||
1407 | { | ||
1408 | if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false) | ||
1409 | return BFI_IOC_IMG_VER_INCOMP; | ||
1410 | |||
1411 | if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) | ||
1412 | return BFI_IOC_IMG_VER_BETTER; | ||
1413 | else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) | ||
1414 | return BFI_IOC_IMG_VER_OLD; | ||
1415 | |||
1416 | /* GA takes priority over internal builds of the same patch stream. | ||
1417 | * At this point major minor maint and patch numbers are same. | ||
1418 | */ | ||
1419 | if (fwhdr_is_ga(base_fwhdr) == true) | ||
1420 | if (fwhdr_is_ga(fwhdr_to_cmp)) | ||
1421 | return BFI_IOC_IMG_VER_SAME; | ||
1422 | else | ||
1423 | return BFI_IOC_IMG_VER_OLD; | ||
1424 | else | ||
1425 | if (fwhdr_is_ga(fwhdr_to_cmp)) | ||
1426 | return BFI_IOC_IMG_VER_BETTER; | ||
1427 | |||
1428 | if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) | ||
1429 | return BFI_IOC_IMG_VER_BETTER; | ||
1430 | else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) | ||
1431 | return BFI_IOC_IMG_VER_OLD; | ||
1432 | |||
1433 | if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) | ||
1434 | return BFI_IOC_IMG_VER_BETTER; | ||
1435 | else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) | ||
1436 | return BFI_IOC_IMG_VER_OLD; | ||
1437 | |||
1438 | /* All Version Numbers are equal. | ||
1439 | * Md5 check to be done as a part of compatibility check. | ||
1440 | */ | ||
1441 | return BFI_IOC_IMG_VER_SAME; | ||
1442 | } | ||
1443 | |||
1444 | /* register definitions */ | ||
1445 | #define FLI_CMD_REG 0x0001d000 | ||
1446 | #define FLI_WRDATA_REG 0x0001d00c | ||
1447 | #define FLI_RDDATA_REG 0x0001d010 | ||
1448 | #define FLI_ADDR_REG 0x0001d004 | ||
1449 | #define FLI_DEV_STATUS_REG 0x0001d014 | ||
1450 | |||
1451 | #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ | ||
1452 | #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ | ||
1453 | #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ | ||
1454 | #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ | ||
1455 | |||
1456 | #define NFC_STATE_RUNNING 0x20000001 | ||
1457 | #define NFC_STATE_PAUSED 0x00004560 | ||
1458 | #define NFC_VER_VALID 0x147 | ||
1459 | |||
1460 | enum bfa_flash_cmd { | ||
1461 | BFA_FLASH_FAST_READ = 0x0b, /* fast read */ | ||
1462 | BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ | ||
1463 | BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ | ||
1464 | BFA_FLASH_WRITE = 0x02, /* write */ | ||
1465 | BFA_FLASH_READ_STATUS = 0x05, /* read status */ | ||
1466 | }; | ||
1467 | |||
1468 | /* hardware error definition */ | ||
1469 | enum bfa_flash_err { | ||
1470 | BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ | ||
1471 | BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ | ||
1472 | BFA_FLASH_BAD = -3, /*!< flash bad */ | ||
1473 | BFA_FLASH_BUSY = -4, /*!< flash busy */ | ||
1474 | BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ | ||
1475 | BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ | ||
1476 | BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ | ||
1477 | BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ | ||
1478 | BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ | ||
1479 | }; | ||
1480 | |||
1481 | /* flash command register data structure */ | ||
1482 | union bfa_flash_cmd_reg { | ||
1483 | struct { | ||
1484 | #ifdef __BIG_ENDIAN | ||
1485 | u32 act:1; | ||
1486 | u32 rsv:1; | ||
1487 | u32 write_cnt:9; | ||
1488 | u32 read_cnt:9; | ||
1489 | u32 addr_cnt:4; | ||
1490 | u32 cmd:8; | ||
1491 | #else | ||
1492 | u32 cmd:8; | ||
1493 | u32 addr_cnt:4; | ||
1494 | u32 read_cnt:9; | ||
1495 | u32 write_cnt:9; | ||
1496 | u32 rsv:1; | ||
1497 | u32 act:1; | ||
1498 | #endif | ||
1499 | } r; | ||
1500 | u32 i; | ||
1501 | }; | ||
1502 | |||
1503 | /* flash device status register data structure */ | ||
1504 | union bfa_flash_dev_status_reg { | ||
1505 | struct { | ||
1506 | #ifdef __BIG_ENDIAN | ||
1507 | u32 rsv:21; | ||
1508 | u32 fifo_cnt:6; | ||
1509 | u32 busy:1; | ||
1510 | u32 init_status:1; | ||
1511 | u32 present:1; | ||
1512 | u32 bad:1; | ||
1513 | u32 good:1; | ||
1514 | #else | ||
1515 | u32 good:1; | ||
1516 | u32 bad:1; | ||
1517 | u32 present:1; | ||
1518 | u32 init_status:1; | ||
1519 | u32 busy:1; | ||
1520 | u32 fifo_cnt:6; | ||
1521 | u32 rsv:21; | ||
1522 | #endif | ||
1523 | } r; | ||
1524 | u32 i; | ||
1525 | }; | ||
1526 | |||
1527 | /* flash address register data structure */ | ||
1528 | union bfa_flash_addr_reg { | ||
1529 | struct { | ||
1530 | #ifdef __BIG_ENDIAN | ||
1531 | u32 addr:24; | ||
1532 | u32 dummy:8; | ||
1533 | #else | ||
1534 | u32 dummy:8; | ||
1535 | u32 addr:24; | ||
1536 | #endif | ||
1537 | } r; | ||
1538 | u32 i; | ||
1539 | }; | ||
1540 | |||
1541 | /* Flash raw private functions */ | ||
1542 | static void | ||
1543 | bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, | ||
1544 | u8 rd_cnt, u8 ad_cnt, u8 op) | ||
1545 | { | ||
1546 | union bfa_flash_cmd_reg cmd; | ||
1547 | |||
1548 | cmd.i = 0; | ||
1549 | cmd.r.act = 1; | ||
1550 | cmd.r.write_cnt = wr_cnt; | ||
1551 | cmd.r.read_cnt = rd_cnt; | ||
1552 | cmd.r.addr_cnt = ad_cnt; | ||
1553 | cmd.r.cmd = op; | ||
1554 | writel(cmd.i, (pci_bar + FLI_CMD_REG)); | ||
1555 | } | ||
1556 | |||
1557 | static void | ||
1558 | bfa_flash_set_addr(void __iomem *pci_bar, u32 address) | ||
1559 | { | ||
1560 | union bfa_flash_addr_reg addr; | ||
1561 | |||
1562 | addr.r.addr = address & 0x00ffffff; | ||
1563 | addr.r.dummy = 0; | ||
1564 | writel(addr.i, (pci_bar + FLI_ADDR_REG)); | ||
1565 | } | ||
1566 | |||
1567 | static int | ||
1568 | bfa_flash_cmd_act_check(void __iomem *pci_bar) | ||
1569 | { | ||
1570 | union bfa_flash_cmd_reg cmd; | ||
1571 | |||
1572 | cmd.i = readl(pci_bar + FLI_CMD_REG); | ||
1573 | |||
1574 | if (cmd.r.act) | ||
1575 | return BFA_FLASH_ERR_CMD_ACT; | ||
1576 | |||
1577 | return 0; | ||
1578 | } | ||
1579 | |||
1580 | /* Flush FLI data fifo. */ | ||
1581 | static u32 | ||
1582 | bfa_flash_fifo_flush(void __iomem *pci_bar) | ||
1583 | { | ||
1584 | u32 i; | ||
1585 | u32 t; | ||
1586 | union bfa_flash_dev_status_reg dev_status; | ||
1587 | |||
1588 | dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); | ||
1589 | |||
1590 | if (!dev_status.r.fifo_cnt) | ||
1591 | return 0; | ||
1592 | |||
1593 | /* fifo counter in terms of words */ | ||
1594 | for (i = 0; i < dev_status.r.fifo_cnt; i++) | ||
1595 | t = readl(pci_bar + FLI_RDDATA_REG); | ||
1596 | |||
1597 | /* Check the device status. It may take some time. */ | ||
1598 | for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { | ||
1599 | dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); | ||
1600 | if (!dev_status.r.fifo_cnt) | ||
1601 | break; | ||
1602 | } | ||
1603 | |||
1604 | if (dev_status.r.fifo_cnt) | ||
1605 | return BFA_FLASH_ERR_FIFO_CNT; | ||
1606 | |||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | /* Read flash status. */ | ||
1611 | static u32 | ||
1612 | bfa_flash_status_read(void __iomem *pci_bar) | ||
1613 | { | ||
1614 | union bfa_flash_dev_status_reg dev_status; | ||
1615 | u32 status; | ||
1616 | u32 ret_status; | ||
1617 | int i; | ||
1618 | |||
1619 | status = bfa_flash_fifo_flush(pci_bar); | ||
1620 | if (status < 0) | ||
1621 | return status; | ||
1622 | |||
1623 | bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); | ||
1624 | |||
1625 | for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { | ||
1626 | status = bfa_flash_cmd_act_check(pci_bar); | ||
1627 | if (!status) | ||
1628 | break; | ||
1629 | } | ||
1630 | |||
1631 | if (status) | ||
1632 | return status; | ||
1633 | |||
1634 | dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); | ||
1635 | if (!dev_status.r.fifo_cnt) | ||
1636 | return BFA_FLASH_BUSY; | ||
1637 | |||
1638 | ret_status = readl(pci_bar + FLI_RDDATA_REG); | ||
1639 | ret_status >>= 24; | ||
1640 | |||
1641 | status = bfa_flash_fifo_flush(pci_bar); | ||
1642 | if (status < 0) | ||
1643 | return status; | ||
1644 | |||
1645 | return ret_status; | ||
1646 | } | ||
1647 | |||
1648 | /* Start flash read operation. */ | ||
1649 | static u32 | ||
1650 | bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, | ||
1651 | char *buf) | ||
1652 | { | ||
1653 | u32 status; | ||
1654 | |||
1655 | /* len must be mutiple of 4 and not exceeding fifo size */ | ||
1656 | if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) | ||
1657 | return BFA_FLASH_ERR_LEN; | ||
1658 | |||
1659 | /* check status */ | ||
1660 | status = bfa_flash_status_read(pci_bar); | ||
1661 | if (status == BFA_FLASH_BUSY) | ||
1662 | status = bfa_flash_status_read(pci_bar); | ||
1663 | |||
1664 | if (status < 0) | ||
1665 | return status; | ||
1666 | |||
1667 | /* check if write-in-progress bit is cleared */ | ||
1668 | if (status & BFA_FLASH_WIP_MASK) | ||
1669 | return BFA_FLASH_ERR_WIP; | ||
1670 | |||
1671 | bfa_flash_set_addr(pci_bar, offset); | ||
1672 | |||
1673 | bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); | ||
1674 | |||
1675 | return 0; | ||
1676 | } | ||
1677 | |||
1678 | /* Check flash read operation. */ | ||
1679 | static u32 | ||
1680 | bfa_flash_read_check(void __iomem *pci_bar) | ||
1681 | { | ||
1682 | if (bfa_flash_cmd_act_check(pci_bar)) | ||
1683 | return 1; | ||
1684 | |||
1685 | return 0; | ||
1686 | } | ||
1687 | |||
1688 | /* End flash read operation. */ | ||
1689 | static void | ||
1690 | bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) | ||
1691 | { | ||
1692 | u32 i; | ||
1693 | |||
1694 | /* read data fifo up to 32 words */ | ||
1695 | for (i = 0; i < len; i += 4) { | ||
1696 | u32 w = readl(pci_bar + FLI_RDDATA_REG); | ||
1697 | *((u32 *)(buf + i)) = swab32(w); | ||
1698 | } | ||
1699 | |||
1700 | bfa_flash_fifo_flush(pci_bar); | ||
1701 | } | ||
1702 | |||
1703 | /* Perform flash raw read. */ | ||
1704 | |||
1705 | #define FLASH_BLOCKING_OP_MAX 500 | ||
1706 | #define FLASH_SEM_LOCK_REG 0x18820 | ||
1707 | |||
1708 | static int | ||
1709 | bfa_raw_sem_get(void __iomem *bar) | ||
1710 | { | ||
1711 | int locked; | ||
1712 | |||
1713 | locked = readl((bar + FLASH_SEM_LOCK_REG)); | ||
1714 | |||
1715 | return !locked; | ||
1716 | } | ||
1717 | |||
1718 | static enum bfa_status | ||
1719 | bfa_flash_sem_get(void __iomem *bar) | ||
1720 | { | ||
1721 | u32 n = FLASH_BLOCKING_OP_MAX; | ||
1722 | |||
1723 | while (!bfa_raw_sem_get(bar)) { | ||
1724 | if (--n <= 0) | ||
1725 | return BFA_STATUS_BADFLASH; | ||
1726 | udelay(10000); | ||
1727 | } | ||
1728 | return BFA_STATUS_OK; | ||
1729 | } | ||
1730 | |||
1731 | static void | ||
1732 | bfa_flash_sem_put(void __iomem *bar) | ||
1733 | { | ||
1734 | writel(0, (bar + FLASH_SEM_LOCK_REG)); | ||
1735 | } | ||
1736 | |||
1737 | static enum bfa_status | ||
1738 | bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, | ||
1739 | u32 len) | ||
1740 | { | ||
1741 | u32 n, status; | ||
1742 | u32 off, l, s, residue, fifo_sz; | ||
1743 | |||
1744 | residue = len; | ||
1745 | off = 0; | ||
1746 | fifo_sz = BFA_FLASH_FIFO_SIZE; | ||
1747 | status = bfa_flash_sem_get(pci_bar); | ||
1748 | if (status != BFA_STATUS_OK) | ||
1749 | return status; | ||
1750 | |||
1751 | while (residue) { | ||
1752 | s = offset + off; | ||
1753 | n = s / fifo_sz; | ||
1754 | l = (n + 1) * fifo_sz - s; | ||
1755 | if (l > residue) | ||
1756 | l = residue; | ||
1757 | |||
1758 | status = bfa_flash_read_start(pci_bar, offset + off, l, | ||
1759 | &buf[off]); | ||
1760 | if (status < 0) { | ||
1761 | bfa_flash_sem_put(pci_bar); | ||
1762 | return BFA_STATUS_FAILED; | ||
1763 | } | ||
1764 | |||
1765 | n = BFA_FLASH_BLOCKING_OP_MAX; | ||
1766 | while (bfa_flash_read_check(pci_bar)) { | ||
1767 | if (--n <= 0) { | ||
1768 | bfa_flash_sem_put(pci_bar); | ||
1769 | return BFA_STATUS_FAILED; | ||
1770 | } | ||
1771 | } | ||
1772 | |||
1773 | bfa_flash_read_end(pci_bar, l, &buf[off]); | ||
1774 | |||
1775 | residue -= l; | ||
1776 | off += l; | ||
1777 | } | ||
1778 | bfa_flash_sem_put(pci_bar); | ||
1779 | |||
1780 | return BFA_STATUS_OK; | ||
1781 | } | ||
1782 | |||
1783 | u32 | ||
1784 | bfa_nw_ioc_flash_img_get_size(struct bfa_ioc *ioc) | ||
1785 | { | ||
1786 | return BFI_FLASH_IMAGE_SZ/sizeof(u32); | ||
1787 | } | ||
1788 | |||
1789 | #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ | ||
1790 | |||
1791 | enum bfa_status | ||
1792 | bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, | ||
1793 | u32 *fwimg) | ||
1794 | { | ||
1795 | return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, | ||
1796 | BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), | ||
1797 | (char *)fwimg, BFI_FLASH_CHUNK_SZ); | ||
1798 | } | ||
1799 | |||
1800 | static enum bfi_ioc_img_ver_cmp | ||
1801 | bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, | ||
1802 | struct bfi_ioc_image_hdr *base_fwhdr) | ||
1803 | { | ||
1804 | struct bfi_ioc_image_hdr *flash_fwhdr; | ||
1805 | enum bfa_status status; | ||
1806 | u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; | ||
1807 | |||
1808 | status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); | ||
1809 | if (status != BFA_STATUS_OK) | ||
1810 | return BFI_IOC_IMG_VER_INCOMP; | ||
1811 | |||
1812 | flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; | ||
1813 | if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) | ||
1814 | return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); | ||
1815 | else | ||
1816 | return BFI_IOC_IMG_VER_INCOMP; | ||
1817 | } | ||
1818 | |||
1819 | /** | ||
1820 | * Returns TRUE if driver is willing to work with current smem f/w version. | ||
1821 | */ | ||
1313 | bool | 1822 | bool |
1314 | bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) | 1823 | bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) |
1315 | { | 1824 | { |
1316 | struct bfi_ioc_image_hdr *drv_fwhdr; | 1825 | struct bfi_ioc_image_hdr *drv_fwhdr; |
1317 | int i; | 1826 | enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; |
1318 | 1827 | ||
1319 | drv_fwhdr = (struct bfi_ioc_image_hdr *) | 1828 | drv_fwhdr = (struct bfi_ioc_image_hdr *) |
1320 | bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); | 1829 | bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); |
1321 | 1830 | ||
1322 | for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { | 1831 | /* If smem is incompatible or old, driver should not work with it. */ |
1323 | if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) | 1832 | drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); |
1324 | return false; | 1833 | if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || |
1834 | drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { | ||
1835 | return false; | ||
1325 | } | 1836 | } |
1326 | 1837 | ||
1327 | return true; | 1838 | /* IF Flash has a better F/W than smem do not work with smem. |
1839 | * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. | ||
1840 | * If Flash is old or incomp work with smem iff smem f/w == drv f/w. | ||
1841 | */ | ||
1842 | smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); | ||
1843 | |||
1844 | if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) | ||
1845 | return false; | ||
1846 | else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) | ||
1847 | return true; | ||
1848 | else | ||
1849 | return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? | ||
1850 | true : false; | ||
1328 | } | 1851 | } |
1329 | 1852 | ||
1330 | /* Return true if current running version is valid. Firmware signature and | 1853 | /* Return true if current running version is valid. Firmware signature and |
@@ -1333,15 +1856,9 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) | |||
1333 | static bool | 1856 | static bool |
1334 | bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) | 1857 | bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) |
1335 | { | 1858 | { |
1336 | struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; | 1859 | struct bfi_ioc_image_hdr fwhdr; |
1337 | 1860 | ||
1338 | bfa_nw_ioc_fwver_get(ioc, &fwhdr); | 1861 | bfa_nw_ioc_fwver_get(ioc, &fwhdr); |
1339 | drv_fwhdr = (struct bfi_ioc_image_hdr *) | ||
1340 | bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); | ||
1341 | |||
1342 | if (fwhdr.signature != drv_fwhdr->signature) | ||
1343 | return false; | ||
1344 | |||
1345 | if (swab32(fwhdr.bootenv) != boot_env) | 1862 | if (swab32(fwhdr.bootenv) != boot_env) |
1346 | return false; | 1863 | return false; |
1347 | 1864 | ||
@@ -1366,7 +1883,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
1366 | bool fwvalid; | 1883 | bool fwvalid; |
1367 | u32 boot_env; | 1884 | u32 boot_env; |
1368 | 1885 | ||
1369 | ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); | 1886 | ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); |
1370 | 1887 | ||
1371 | if (force) | 1888 | if (force) |
1372 | ioc_fwstate = BFI_IOC_UNINIT; | 1889 | ioc_fwstate = BFI_IOC_UNINIT; |
@@ -1380,8 +1897,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
1380 | false : bfa_ioc_fwver_valid(ioc, boot_env); | 1897 | false : bfa_ioc_fwver_valid(ioc, boot_env); |
1381 | 1898 | ||
1382 | if (!fwvalid) { | 1899 | if (!fwvalid) { |
1383 | bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env); | 1900 | if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == |
1384 | bfa_ioc_poll_fwinit(ioc); | 1901 | BFA_STATUS_OK) |
1902 | bfa_ioc_poll_fwinit(ioc); | ||
1903 | |||
1385 | return; | 1904 | return; |
1386 | } | 1905 | } |
1387 | 1906 | ||
@@ -1411,8 +1930,9 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
1411 | /** | 1930 | /** |
1412 | * Initialize the h/w for any other states. | 1931 | * Initialize the h/w for any other states. |
1413 | */ | 1932 | */ |
1414 | bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env); | 1933 | if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == |
1415 | bfa_ioc_poll_fwinit(ioc); | 1934 | BFA_STATUS_OK) |
1935 | bfa_ioc_poll_fwinit(ioc); | ||
1416 | } | 1936 | } |
1417 | 1937 | ||
1418 | void | 1938 | void |
@@ -1517,7 +2037,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc) | |||
1517 | } | 2037 | } |
1518 | 2038 | ||
1519 | /* Initiate a full firmware download. */ | 2039 | /* Initiate a full firmware download. */ |
1520 | static void | 2040 | static enum bfa_status |
1521 | bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | 2041 | bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, |
1522 | u32 boot_env) | 2042 | u32 boot_env) |
1523 | { | 2043 | { |
@@ -1527,18 +2047,47 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | |||
1527 | u32 chunkno = 0; | 2047 | u32 chunkno = 0; |
1528 | u32 i; | 2048 | u32 i; |
1529 | u32 asicmode; | 2049 | u32 asicmode; |
2050 | u32 fwimg_size; | ||
2051 | u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; | ||
2052 | enum bfa_status status; | ||
2053 | |||
2054 | if (boot_env == BFI_FWBOOT_ENV_OS && | ||
2055 | boot_type == BFI_FWBOOT_TYPE_FLASH) { | ||
2056 | fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); | ||
2057 | |||
2058 | status = bfa_nw_ioc_flash_img_get_chnk(ioc, | ||
2059 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); | ||
2060 | if (status != BFA_STATUS_OK) | ||
2061 | return status; | ||
1530 | 2062 | ||
1531 | fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); | 2063 | fwimg = fwimg_buf; |
2064 | } else { | ||
2065 | fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); | ||
2066 | fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), | ||
2067 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); | ||
2068 | } | ||
1532 | 2069 | ||
1533 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 2070 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); |
1534 | 2071 | ||
1535 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | 2072 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); |
1536 | 2073 | ||
1537 | for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) { | 2074 | for (i = 0; i < fwimg_size; i++) { |
1538 | if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { | 2075 | if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { |
1539 | chunkno = BFA_IOC_FLASH_CHUNK_NO(i); | 2076 | chunkno = BFA_IOC_FLASH_CHUNK_NO(i); |
1540 | fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), | 2077 | if (boot_env == BFI_FWBOOT_ENV_OS && |
2078 | boot_type == BFI_FWBOOT_TYPE_FLASH) { | ||
2079 | status = bfa_nw_ioc_flash_img_get_chnk(ioc, | ||
2080 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno), | ||
2081 | fwimg_buf); | ||
2082 | if (status != BFA_STATUS_OK) | ||
2083 | return status; | ||
2084 | |||
2085 | fwimg = fwimg_buf; | ||
2086 | } else { | ||
2087 | fwimg = bfa_cb_image_get_chunk( | ||
2088 | bfa_ioc_asic_gen(ioc), | ||
1541 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); | 2089 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); |
2090 | } | ||
1542 | } | 2091 | } |
1543 | 2092 | ||
1544 | /** | 2093 | /** |
@@ -1566,6 +2115,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | |||
1566 | /* | 2115 | /* |
1567 | * Set boot type, env and device mode at the end. | 2116 | * Set boot type, env and device mode at the end. |
1568 | */ | 2117 | */ |
2118 | if (boot_env == BFI_FWBOOT_ENV_OS && | ||
2119 | boot_type == BFI_FWBOOT_TYPE_FLASH) { | ||
2120 | boot_type = BFI_FWBOOT_TYPE_NORMAL; | ||
2121 | } | ||
1569 | asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, | 2122 | asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, |
1570 | ioc->port0_mode, ioc->port1_mode); | 2123 | ioc->port0_mode, ioc->port1_mode); |
1571 | writel(asicmode, ((ioc->ioc_regs.smem_page_start) | 2124 | writel(asicmode, ((ioc->ioc_regs.smem_page_start) |
@@ -1574,6 +2127,7 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | |||
1574 | + (BFI_FWBOOT_TYPE_OFF))); | 2127 | + (BFI_FWBOOT_TYPE_OFF))); |
1575 | writel(boot_env, ((ioc->ioc_regs.smem_page_start) | 2128 | writel(boot_env, ((ioc->ioc_regs.smem_page_start) |
1576 | + (BFI_FWBOOT_ENV_OFF))); | 2129 | + (BFI_FWBOOT_ENV_OFF))); |
2130 | return BFA_STATUS_OK; | ||
1577 | } | 2131 | } |
1578 | 2132 | ||
1579 | static void | 2133 | static void |
@@ -1846,29 +2400,47 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc) | |||
1846 | /* Interface used by diag module to do firmware boot with memory test | 2400 | /* Interface used by diag module to do firmware boot with memory test |
1847 | * as the entry vector. | 2401 | * as the entry vector. |
1848 | */ | 2402 | */ |
1849 | static void | 2403 | static enum bfa_status |
1850 | bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, | 2404 | bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, |
1851 | u32 boot_env) | 2405 | u32 boot_env) |
1852 | { | 2406 | { |
2407 | struct bfi_ioc_image_hdr *drv_fwhdr; | ||
2408 | enum bfa_status status; | ||
1853 | bfa_ioc_stats(ioc, ioc_boots); | 2409 | bfa_ioc_stats(ioc, ioc_boots); |
1854 | 2410 | ||
1855 | if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) | 2411 | if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) |
1856 | return; | 2412 | return BFA_STATUS_FAILED; |
2413 | if (boot_env == BFI_FWBOOT_ENV_OS && | ||
2414 | boot_type == BFI_FWBOOT_TYPE_NORMAL) { | ||
2415 | drv_fwhdr = (struct bfi_ioc_image_hdr *) | ||
2416 | bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); | ||
2417 | /* Work with Flash iff flash f/w is better than driver f/w. | ||
2418 | * Otherwise push drivers firmware. | ||
2419 | */ | ||
2420 | if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == | ||
2421 | BFI_IOC_IMG_VER_BETTER) | ||
2422 | boot_type = BFI_FWBOOT_TYPE_FLASH; | ||
2423 | } | ||
1857 | 2424 | ||
1858 | /** | 2425 | /** |
1859 | * Initialize IOC state of all functions on a chip reset. | 2426 | * Initialize IOC state of all functions on a chip reset. |
1860 | */ | 2427 | */ |
1861 | if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { | 2428 | if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { |
1862 | writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); | 2429 | bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); |
1863 | writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); | 2430 | bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); |
1864 | } else { | 2431 | } else { |
1865 | writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); | 2432 | bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); |
1866 | writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); | 2433 | bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); |
1867 | } | 2434 | } |
1868 | 2435 | ||
1869 | bfa_ioc_msgflush(ioc); | 2436 | bfa_ioc_msgflush(ioc); |
1870 | bfa_ioc_download_fw(ioc, boot_type, boot_env); | 2437 | status = bfa_ioc_download_fw(ioc, boot_type, boot_env); |
1871 | bfa_ioc_lpu_start(ioc); | 2438 | if (status == BFA_STATUS_OK) |
2439 | bfa_ioc_lpu_start(ioc); | ||
2440 | else | ||
2441 | bfa_nw_iocpf_timeout(ioc); | ||
2442 | |||
2443 | return status; | ||
1872 | } | 2444 | } |
1873 | 2445 | ||
1874 | /* Enable/disable IOC failure auto recovery. */ | 2446 | /* Enable/disable IOC failure auto recovery. */ |
@@ -2473,7 +3045,7 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg) | |||
2473 | static void | 3045 | static void |
2474 | bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) | 3046 | bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) |
2475 | { | 3047 | { |
2476 | u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); | 3048 | u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); |
2477 | 3049 | ||
2478 | if (fwstate == BFI_IOC_DISABLED) { | 3050 | if (fwstate == BFI_IOC_DISABLED) { |
2479 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); | 3051 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index f04e0aab25b4..20cff7df4b55 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h | |||
@@ -215,6 +215,13 @@ struct bfa_ioc_hwif { | |||
215 | void (*ioc_sync_ack) (struct bfa_ioc *ioc); | 215 | void (*ioc_sync_ack) (struct bfa_ioc *ioc); |
216 | bool (*ioc_sync_complete) (struct bfa_ioc *ioc); | 216 | bool (*ioc_sync_complete) (struct bfa_ioc *ioc); |
217 | bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc); | 217 | bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc); |
218 | void (*ioc_set_fwstate) (struct bfa_ioc *ioc, | ||
219 | enum bfi_ioc_state fwstate); | ||
220 | enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc); | ||
221 | void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc, | ||
222 | enum bfi_ioc_state fwstate); | ||
223 | enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc); | ||
224 | |||
218 | }; | 225 | }; |
219 | 226 | ||
220 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) | 227 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) |
@@ -291,6 +298,7 @@ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); | |||
291 | bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); | 298 | bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); |
292 | bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); | 299 | bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); |
293 | void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); | 300 | void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); |
301 | enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc); | ||
294 | void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, | 302 | void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, |
295 | struct bfa_ioc_notify *notify); | 303 | struct bfa_ioc_notify *notify); |
296 | bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); | 304 | bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index 5df0b0c68c5a..d639558455cb 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | |||
@@ -48,6 +48,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); | |||
48 | static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); | 48 | static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); |
49 | static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); | 49 | static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); |
50 | static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); | 50 | static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); |
51 | static void bfa_ioc_ct_set_cur_ioc_fwstate( | ||
52 | struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); | ||
53 | static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc); | ||
54 | static void bfa_ioc_ct_set_alt_ioc_fwstate( | ||
55 | struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); | ||
56 | static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc); | ||
51 | static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, | 57 | static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, |
52 | enum bfi_asic_mode asic_mode); | 58 | enum bfi_asic_mode asic_mode); |
53 | static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, | 59 | static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, |
@@ -68,6 +74,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct = { | |||
68 | .ioc_sync_leave = bfa_ioc_ct_sync_leave, | 74 | .ioc_sync_leave = bfa_ioc_ct_sync_leave, |
69 | .ioc_sync_ack = bfa_ioc_ct_sync_ack, | 75 | .ioc_sync_ack = bfa_ioc_ct_sync_ack, |
70 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, | 76 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, |
77 | .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, | ||
78 | .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, | ||
79 | .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, | ||
80 | .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, | ||
71 | }; | 81 | }; |
72 | 82 | ||
73 | static const struct bfa_ioc_hwif nw_hwif_ct2 = { | 83 | static const struct bfa_ioc_hwif nw_hwif_ct2 = { |
@@ -85,6 +95,10 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = { | |||
85 | .ioc_sync_leave = bfa_ioc_ct_sync_leave, | 95 | .ioc_sync_leave = bfa_ioc_ct_sync_leave, |
86 | .ioc_sync_ack = bfa_ioc_ct_sync_ack, | 96 | .ioc_sync_ack = bfa_ioc_ct_sync_ack, |
87 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, | 97 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, |
98 | .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, | ||
99 | .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, | ||
100 | .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, | ||
101 | .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, | ||
88 | }; | 102 | }; |
89 | 103 | ||
90 | /* Called from bfa_ioc_attach() to map asic specific calls. */ | 104 | /* Called from bfa_ioc_attach() to map asic specific calls. */ |
@@ -565,6 +579,32 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) | |||
565 | return false; | 579 | return false; |
566 | } | 580 | } |
567 | 581 | ||
582 | static void | ||
583 | bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, | ||
584 | enum bfi_ioc_state fwstate) | ||
585 | { | ||
586 | writel(fwstate, ioc->ioc_regs.ioc_fwstate); | ||
587 | } | ||
588 | |||
589 | static enum bfi_ioc_state | ||
590 | bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) | ||
591 | { | ||
592 | return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); | ||
593 | } | ||
594 | |||
595 | static void | ||
596 | bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, | ||
597 | enum bfi_ioc_state fwstate) | ||
598 | { | ||
599 | writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); | ||
600 | } | ||
601 | |||
602 | static enum bfi_ioc_state | ||
603 | bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) | ||
604 | { | ||
605 | return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); | ||
606 | } | ||
607 | |||
568 | static enum bfa_status | 608 | static enum bfa_status |
569 | bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) | 609 | bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) |
570 | { | 610 | { |
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 1f24c23dc786..8c563a77cdf6 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h | |||
@@ -25,6 +25,7 @@ | |||
25 | /* BFI FW image type */ | 25 | /* BFI FW image type */ |
26 | #define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ | 26 | #define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ |
27 | #define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) | 27 | #define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) |
28 | #define BFI_FLASH_IMAGE_SZ 0x100000 | ||
28 | 29 | ||
29 | /* Msg header common to all msgs */ | 30 | /* Msg header common to all msgs */ |
30 | struct bfi_mhdr { | 31 | struct bfi_mhdr { |
@@ -233,7 +234,29 @@ struct bfi_ioc_getattr_reply { | |||
233 | #define BFI_IOC_TRC_HDR_SZ 32 | 234 | #define BFI_IOC_TRC_HDR_SZ 32 |
234 | 235 | ||
235 | #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) | 236 | #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) |
237 | #define BFI_IOC_FW_INV_SIGN (0xdeaddead) | ||
236 | #define BFI_IOC_MD5SUM_SZ 4 | 238 | #define BFI_IOC_MD5SUM_SZ 4 |
239 | |||
240 | struct bfi_ioc_fwver { | ||
241 | #ifdef __BIG_ENDIAN | ||
242 | u8 patch; | ||
243 | u8 maint; | ||
244 | u8 minor; | ||
245 | u8 major; | ||
246 | u8 rsvd[2]; | ||
247 | u8 build; | ||
248 | u8 phase; | ||
249 | #else | ||
250 | u8 major; | ||
251 | u8 minor; | ||
252 | u8 maint; | ||
253 | u8 patch; | ||
254 | u8 phase; | ||
255 | u8 build; | ||
256 | u8 rsvd[2]; | ||
257 | #endif | ||
258 | }; | ||
259 | |||
237 | struct bfi_ioc_image_hdr { | 260 | struct bfi_ioc_image_hdr { |
238 | u32 signature; /*!< constant signature */ | 261 | u32 signature; /*!< constant signature */ |
239 | u8 asic_gen; /*!< asic generation */ | 262 | u8 asic_gen; /*!< asic generation */ |
@@ -242,10 +265,18 @@ struct bfi_ioc_image_hdr { | |||
242 | u8 port1_mode; /*!< device mode for port 1 */ | 265 | u8 port1_mode; /*!< device mode for port 1 */ |
243 | u32 exec; /*!< exec vector */ | 266 | u32 exec; /*!< exec vector */ |
244 | u32 bootenv; /*!< firmware boot env */ | 267 | u32 bootenv; /*!< firmware boot env */ |
245 | u32 rsvd_b[4]; | 268 | u32 rsvd_b[2]; |
269 | struct bfi_ioc_fwver fwver; | ||
246 | u32 md5sum[BFI_IOC_MD5SUM_SZ]; | 270 | u32 md5sum[BFI_IOC_MD5SUM_SZ]; |
247 | }; | 271 | }; |
248 | 272 | ||
273 | enum bfi_ioc_img_ver_cmp { | ||
274 | BFI_IOC_IMG_VER_INCOMP, | ||
275 | BFI_IOC_IMG_VER_OLD, | ||
276 | BFI_IOC_IMG_VER_SAME, | ||
277 | BFI_IOC_IMG_VER_BETTER | ||
278 | }; | ||
279 | |||
249 | #define BFI_FWBOOT_DEVMODE_OFF 4 | 280 | #define BFI_FWBOOT_DEVMODE_OFF 4 |
250 | #define BFI_FWBOOT_TYPE_OFF 8 | 281 | #define BFI_FWBOOT_TYPE_OFF 8 |
251 | #define BFI_FWBOOT_ENV_OFF 12 | 282 | #define BFI_FWBOOT_ENV_OFF 12 |
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index 7d10e335c27d..ae072dc5d238 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h | |||
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type { | |||
472 | 472 | ||
473 | struct bfi_enet_rx_cfg { | 473 | struct bfi_enet_rx_cfg { |
474 | u8 rxq_type; | 474 | u8 rxq_type; |
475 | u8 rsvd[3]; | 475 | u8 rsvd[1]; |
476 | u16 frame_size; | ||
476 | 477 | ||
477 | struct { | 478 | struct { |
478 | u8 max_header_size; | 479 | u8 max_header_size; |
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index f1eafc409bbd..1f512190d696 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h | |||
@@ -354,6 +354,14 @@ do { \ | |||
354 | } \ | 354 | } \ |
355 | } while (0) | 355 | } while (0) |
356 | 356 | ||
357 | #define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q) | ||
358 | |||
359 | #define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q) | ||
360 | |||
361 | #define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q) | ||
362 | |||
363 | #define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q) | ||
364 | |||
357 | /* Inline functions */ | 365 | /* Inline functions */ |
358 | 366 | ||
359 | static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) | 367 | static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) |
@@ -391,12 +399,8 @@ int bna_num_rxp_set(struct bna *bna, int num_rxp); | |||
391 | void bna_hw_stats_get(struct bna *bna); | 399 | void bna_hw_stats_get(struct bna *bna); |
392 | 400 | ||
393 | /* APIs for RxF */ | 401 | /* APIs for RxF */ |
394 | struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod); | 402 | struct bna_mac *bna_cam_mod_mac_get(struct list_head *head); |
395 | void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, | 403 | void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac); |
396 | struct bna_mac *mac); | ||
397 | struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod); | ||
398 | void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, | ||
399 | struct bna_mac *mac); | ||
400 | struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); | 404 | struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); |
401 | void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, | 405 | void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, |
402 | struct bna_mcam_handle *handle); | 406 | struct bna_mcam_handle *handle); |
@@ -493,11 +497,17 @@ enum bna_cb_status | |||
493 | bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, | 497 | bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, |
494 | void (*cbfn)(struct bnad *, struct bna_rx *)); | 498 | void (*cbfn)(struct bnad *, struct bna_rx *)); |
495 | enum bna_cb_status | 499 | enum bna_cb_status |
500 | bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, | ||
501 | void (*cbfn)(struct bnad *, struct bna_rx *)); | ||
502 | enum bna_cb_status | ||
496 | bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, | 503 | bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, |
497 | void (*cbfn)(struct bnad *, struct bna_rx *)); | 504 | void (*cbfn)(struct bnad *, struct bna_rx *)); |
498 | enum bna_cb_status | 505 | enum bna_cb_status |
499 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, | 506 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, |
500 | void (*cbfn)(struct bnad *, struct bna_rx *)); | 507 | void (*cbfn)(struct bnad *, struct bna_rx *)); |
508 | void | ||
509 | bna_rx_mcast_delall(struct bna_rx *rx, | ||
510 | void (*cbfn)(struct bnad *, struct bna_rx *)); | ||
501 | enum bna_cb_status | 511 | enum bna_cb_status |
502 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, | 512 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, |
503 | enum bna_rxmode bitmask, | 513 | enum bna_rxmode bitmask, |
@@ -505,6 +515,8 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, | |||
505 | void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); | 515 | void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); |
506 | void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); | 516 | void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); |
507 | void bna_rx_vlanfilter_enable(struct bna_rx *rx); | 517 | void bna_rx_vlanfilter_enable(struct bna_rx *rx); |
518 | void bna_rx_vlan_strip_enable(struct bna_rx *rx); | ||
519 | void bna_rx_vlan_strip_disable(struct bna_rx *rx); | ||
508 | /* ENET */ | 520 | /* ENET */ |
509 | 521 | ||
510 | /* API for RX */ | 522 | /* API for RX */ |
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 3ca77fad4851..13f9636cdba7 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c | |||
@@ -1811,6 +1811,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, | |||
1811 | list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); | 1811 | list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); |
1812 | } | 1812 | } |
1813 | 1813 | ||
1814 | /* A separate queue to allow synchronous setting of a list of MACs */ | ||
1815 | INIT_LIST_HEAD(&ucam_mod->del_q); | ||
1816 | for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) { | ||
1817 | bfa_q_qe_init(&ucam_mod->ucmac[i].qe); | ||
1818 | list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); | ||
1819 | } | ||
1820 | |||
1814 | ucam_mod->bna = bna; | 1821 | ucam_mod->bna = bna; |
1815 | } | 1822 | } |
1816 | 1823 | ||
@@ -1818,11 +1825,16 @@ static void | |||
1818 | bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) | 1825 | bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) |
1819 | { | 1826 | { |
1820 | struct list_head *qe; | 1827 | struct list_head *qe; |
1821 | int i = 0; | 1828 | int i; |
1822 | 1829 | ||
1830 | i = 0; | ||
1823 | list_for_each(qe, &ucam_mod->free_q) | 1831 | list_for_each(qe, &ucam_mod->free_q) |
1824 | i++; | 1832 | i++; |
1825 | 1833 | ||
1834 | i = 0; | ||
1835 | list_for_each(qe, &ucam_mod->del_q) | ||
1836 | i++; | ||
1837 | |||
1826 | ucam_mod->bna = NULL; | 1838 | ucam_mod->bna = NULL; |
1827 | } | 1839 | } |
1828 | 1840 | ||
@@ -1851,6 +1863,13 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, | |||
1851 | &mcam_mod->free_handle_q); | 1863 | &mcam_mod->free_handle_q); |
1852 | } | 1864 | } |
1853 | 1865 | ||
1866 | /* A separate queue to allow synchronous setting of a list of MACs */ | ||
1867 | INIT_LIST_HEAD(&mcam_mod->del_q); | ||
1868 | for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) { | ||
1869 | bfa_q_qe_init(&mcam_mod->mcmac[i].qe); | ||
1870 | list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); | ||
1871 | } | ||
1872 | |||
1854 | mcam_mod->bna = bna; | 1873 | mcam_mod->bna = bna; |
1855 | } | 1874 | } |
1856 | 1875 | ||
@@ -1864,6 +1883,9 @@ bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) | |||
1864 | list_for_each(qe, &mcam_mod->free_q) i++; | 1883 | list_for_each(qe, &mcam_mod->free_q) i++; |
1865 | 1884 | ||
1866 | i = 0; | 1885 | i = 0; |
1886 | list_for_each(qe, &mcam_mod->del_q) i++; | ||
1887 | |||
1888 | i = 0; | ||
1867 | list_for_each(qe, &mcam_mod->free_handle_q) i++; | 1889 | list_for_each(qe, &mcam_mod->free_handle_q) i++; |
1868 | 1890 | ||
1869 | mcam_mod->bna = NULL; | 1891 | mcam_mod->bna = NULL; |
@@ -1976,7 +1998,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) | |||
1976 | BNA_MEM_T_KVA; | 1998 | BNA_MEM_T_KVA; |
1977 | res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; | 1999 | res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; |
1978 | res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = | 2000 | res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = |
1979 | attr->num_ucmac * sizeof(struct bna_mac); | 2001 | (attr->num_ucmac * 2) * sizeof(struct bna_mac); |
1980 | 2002 | ||
1981 | /* Virtual memory for Multicast MAC address - stored by mcam module */ | 2003 | /* Virtual memory for Multicast MAC address - stored by mcam module */ |
1982 | res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; | 2004 | res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; |
@@ -1984,7 +2006,7 @@ bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) | |||
1984 | BNA_MEM_T_KVA; | 2006 | BNA_MEM_T_KVA; |
1985 | res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; | 2007 | res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; |
1986 | res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = | 2008 | res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = |
1987 | attr->num_mcmac * sizeof(struct bna_mac); | 2009 | (attr->num_mcmac * 2) * sizeof(struct bna_mac); |
1988 | 2010 | ||
1989 | /* Virtual memory for Multicast handle - stored by mcam module */ | 2011 | /* Virtual memory for Multicast handle - stored by mcam module */ |
1990 | res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; | 2012 | res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; |
@@ -2080,41 +2102,21 @@ bna_num_rxp_set(struct bna *bna, int num_rxp) | |||
2080 | } | 2102 | } |
2081 | 2103 | ||
2082 | struct bna_mac * | 2104 | struct bna_mac * |
2083 | bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod) | 2105 | bna_cam_mod_mac_get(struct list_head *head) |
2084 | { | ||
2085 | struct list_head *qe; | ||
2086 | |||
2087 | if (list_empty(&ucam_mod->free_q)) | ||
2088 | return NULL; | ||
2089 | |||
2090 | bfa_q_deq(&ucam_mod->free_q, &qe); | ||
2091 | |||
2092 | return (struct bna_mac *)qe; | ||
2093 | } | ||
2094 | |||
2095 | void | ||
2096 | bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac) | ||
2097 | { | ||
2098 | list_add_tail(&mac->qe, &ucam_mod->free_q); | ||
2099 | } | ||
2100 | |||
2101 | struct bna_mac * | ||
2102 | bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod) | ||
2103 | { | 2106 | { |
2104 | struct list_head *qe; | 2107 | struct list_head *qe; |
2105 | 2108 | ||
2106 | if (list_empty(&mcam_mod->free_q)) | 2109 | if (list_empty(head)) |
2107 | return NULL; | 2110 | return NULL; |
2108 | 2111 | ||
2109 | bfa_q_deq(&mcam_mod->free_q, &qe); | 2112 | bfa_q_deq(head, &qe); |
2110 | |||
2111 | return (struct bna_mac *)qe; | 2113 | return (struct bna_mac *)qe; |
2112 | } | 2114 | } |
2113 | 2115 | ||
2114 | void | 2116 | void |
2115 | bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac) | 2117 | bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac) |
2116 | { | 2118 | { |
2117 | list_add_tail(&mac->qe, &mcam_mod->free_q); | 2119 | list_add_tail(&mac->qe, tail); |
2118 | } | 2120 | } |
2119 | 2121 | ||
2120 | struct bna_mcam_handle * | 2122 | struct bna_mcam_handle * |
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index af3f7bb0b3b8..2702d02e98d9 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h | |||
@@ -322,6 +322,10 @@ do { \ | |||
322 | #define BNA_CQ_EF_REMOTE (1 << 19) | 322 | #define BNA_CQ_EF_REMOTE (1 << 19) |
323 | 323 | ||
324 | #define BNA_CQ_EF_LOCAL (1 << 20) | 324 | #define BNA_CQ_EF_LOCAL (1 << 20) |
325 | /* CAT2 ASIC does not use bit 21 as per the SPEC. | ||
326 | * Bit 31 is set in every end of frame completion | ||
327 | */ | ||
328 | #define BNA_CQ_EF_EOP (1 << 31) | ||
325 | 329 | ||
326 | /* Data structures */ | 330 | /* Data structures */ |
327 | 331 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 3c07064b2bc4..85e63546abe3 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c | |||
@@ -529,13 +529,13 @@ bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) | |||
529 | struct list_head *qe; | 529 | struct list_head *qe; |
530 | int ret; | 530 | int ret; |
531 | 531 | ||
532 | /* Delete multicast entries previousely added */ | 532 | /* First delete multicast entries to maintain the count */ |
533 | while (!list_empty(&rxf->mcast_pending_del_q)) { | 533 | while (!list_empty(&rxf->mcast_pending_del_q)) { |
534 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | 534 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); |
535 | bfa_q_qe_init(qe); | 535 | bfa_q_qe_init(qe); |
536 | mac = (struct bna_mac *)qe; | 536 | mac = (struct bna_mac *)qe; |
537 | ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); | 537 | ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); |
538 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | 538 | bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); |
539 | if (ret) | 539 | if (ret) |
540 | return ret; | 540 | return ret; |
541 | } | 541 | } |
@@ -586,7 +586,7 @@ bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |||
586 | bfa_q_qe_init(qe); | 586 | bfa_q_qe_init(qe); |
587 | mac = (struct bna_mac *)qe; | 587 | mac = (struct bna_mac *)qe; |
588 | ret = bna_rxf_mcast_del(rxf, mac, cleanup); | 588 | ret = bna_rxf_mcast_del(rxf, mac, cleanup); |
589 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | 589 | bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); |
590 | if (ret) | 590 | if (ret) |
591 | return ret; | 591 | return ret; |
592 | } | 592 | } |
@@ -796,20 +796,20 @@ bna_rxf_uninit(struct bna_rxf *rxf) | |||
796 | while (!list_empty(&rxf->ucast_pending_add_q)) { | 796 | while (!list_empty(&rxf->ucast_pending_add_q)) { |
797 | bfa_q_deq(&rxf->ucast_pending_add_q, &mac); | 797 | bfa_q_deq(&rxf->ucast_pending_add_q, &mac); |
798 | bfa_q_qe_init(&mac->qe); | 798 | bfa_q_qe_init(&mac->qe); |
799 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | 799 | bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac); |
800 | } | 800 | } |
801 | 801 | ||
802 | if (rxf->ucast_pending_mac) { | 802 | if (rxf->ucast_pending_mac) { |
803 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); | 803 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); |
804 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, | 804 | bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), |
805 | rxf->ucast_pending_mac); | 805 | rxf->ucast_pending_mac); |
806 | rxf->ucast_pending_mac = NULL; | 806 | rxf->ucast_pending_mac = NULL; |
807 | } | 807 | } |
808 | 808 | ||
809 | while (!list_empty(&rxf->mcast_pending_add_q)) { | 809 | while (!list_empty(&rxf->mcast_pending_add_q)) { |
810 | bfa_q_deq(&rxf->mcast_pending_add_q, &mac); | 810 | bfa_q_deq(&rxf->mcast_pending_add_q, &mac); |
811 | bfa_q_qe_init(&mac->qe); | 811 | bfa_q_qe_init(&mac->qe); |
812 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | 812 | bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); |
813 | } | 813 | } |
814 | 814 | ||
815 | rxf->rxmode_pending = 0; | 815 | rxf->rxmode_pending = 0; |
@@ -869,7 +869,7 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, | |||
869 | 869 | ||
870 | if (rxf->ucast_pending_mac == NULL) { | 870 | if (rxf->ucast_pending_mac == NULL) { |
871 | rxf->ucast_pending_mac = | 871 | rxf->ucast_pending_mac = |
872 | bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); | 872 | bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); |
873 | if (rxf->ucast_pending_mac == NULL) | 873 | if (rxf->ucast_pending_mac == NULL) |
874 | return BNA_CB_UCAST_CAM_FULL; | 874 | return BNA_CB_UCAST_CAM_FULL; |
875 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); | 875 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); |
@@ -900,7 +900,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, | |||
900 | return BNA_CB_SUCCESS; | 900 | return BNA_CB_SUCCESS; |
901 | } | 901 | } |
902 | 902 | ||
903 | mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); | 903 | mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); |
904 | if (mac == NULL) | 904 | if (mac == NULL) |
905 | return BNA_CB_MCAST_LIST_FULL; | 905 | return BNA_CB_MCAST_LIST_FULL; |
906 | bfa_q_qe_init(&mac->qe); | 906 | bfa_q_qe_init(&mac->qe); |
@@ -916,35 +916,92 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, | |||
916 | } | 916 | } |
917 | 917 | ||
918 | enum bna_cb_status | 918 | enum bna_cb_status |
919 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, | 919 | bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, |
920 | void (*cbfn)(struct bnad *, struct bna_rx *)) | 920 | void (*cbfn)(struct bnad *, struct bna_rx *)) |
921 | { | 921 | { |
922 | struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; | ||
922 | struct bna_rxf *rxf = &rx->rxf; | 923 | struct bna_rxf *rxf = &rx->rxf; |
923 | struct list_head list_head; | 924 | struct list_head list_head; |
924 | struct list_head *qe; | 925 | struct list_head *qe; |
925 | u8 *mcaddr; | 926 | u8 *mcaddr; |
926 | struct bna_mac *mac; | 927 | struct bna_mac *mac, *del_mac; |
927 | int i; | 928 | int i; |
928 | 929 | ||
930 | /* Purge the pending_add_q */ | ||
931 | while (!list_empty(&rxf->ucast_pending_add_q)) { | ||
932 | bfa_q_deq(&rxf->ucast_pending_add_q, &qe); | ||
933 | bfa_q_qe_init(qe); | ||
934 | mac = (struct bna_mac *)qe; | ||
935 | bna_cam_mod_mac_put(&ucam_mod->free_q, mac); | ||
936 | } | ||
937 | |||
938 | /* Schedule active_q entries for deletion */ | ||
939 | while (!list_empty(&rxf->ucast_active_q)) { | ||
940 | bfa_q_deq(&rxf->ucast_active_q, &qe); | ||
941 | mac = (struct bna_mac *)qe; | ||
942 | bfa_q_qe_init(&mac->qe); | ||
943 | |||
944 | del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); | ||
945 | memcpy(del_mac, mac, sizeof(*del_mac)); | ||
946 | list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); | ||
947 | bna_cam_mod_mac_put(&ucam_mod->free_q, mac); | ||
948 | } | ||
949 | |||
929 | /* Allocate nodes */ | 950 | /* Allocate nodes */ |
930 | INIT_LIST_HEAD(&list_head); | 951 | INIT_LIST_HEAD(&list_head); |
931 | for (i = 0, mcaddr = mclist; i < count; i++) { | 952 | for (i = 0, mcaddr = uclist; i < count; i++) { |
932 | mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); | 953 | mac = bna_cam_mod_mac_get(&ucam_mod->free_q); |
933 | if (mac == NULL) | 954 | if (mac == NULL) |
934 | goto err_return; | 955 | goto err_return; |
935 | bfa_q_qe_init(&mac->qe); | 956 | bfa_q_qe_init(&mac->qe); |
936 | memcpy(mac->addr, mcaddr, ETH_ALEN); | 957 | memcpy(mac->addr, mcaddr, ETH_ALEN); |
937 | list_add_tail(&mac->qe, &list_head); | 958 | list_add_tail(&mac->qe, &list_head); |
938 | |||
939 | mcaddr += ETH_ALEN; | 959 | mcaddr += ETH_ALEN; |
940 | } | 960 | } |
941 | 961 | ||
962 | /* Add the new entries */ | ||
963 | while (!list_empty(&list_head)) { | ||
964 | bfa_q_deq(&list_head, &qe); | ||
965 | mac = (struct bna_mac *)qe; | ||
966 | bfa_q_qe_init(&mac->qe); | ||
967 | list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); | ||
968 | } | ||
969 | |||
970 | rxf->cam_fltr_cbfn = cbfn; | ||
971 | rxf->cam_fltr_cbarg = rx->bna->bnad; | ||
972 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | ||
973 | |||
974 | return BNA_CB_SUCCESS; | ||
975 | |||
976 | err_return: | ||
977 | while (!list_empty(&list_head)) { | ||
978 | bfa_q_deq(&list_head, &qe); | ||
979 | mac = (struct bna_mac *)qe; | ||
980 | bfa_q_qe_init(&mac->qe); | ||
981 | bna_cam_mod_mac_put(&ucam_mod->free_q, mac); | ||
982 | } | ||
983 | |||
984 | return BNA_CB_UCAST_CAM_FULL; | ||
985 | } | ||
986 | |||
987 | enum bna_cb_status | ||
988 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, | ||
989 | void (*cbfn)(struct bnad *, struct bna_rx *)) | ||
990 | { | ||
991 | struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; | ||
992 | struct bna_rxf *rxf = &rx->rxf; | ||
993 | struct list_head list_head; | ||
994 | struct list_head *qe; | ||
995 | u8 *mcaddr; | ||
996 | struct bna_mac *mac, *del_mac; | ||
997 | int i; | ||
998 | |||
942 | /* Purge the pending_add_q */ | 999 | /* Purge the pending_add_q */ |
943 | while (!list_empty(&rxf->mcast_pending_add_q)) { | 1000 | while (!list_empty(&rxf->mcast_pending_add_q)) { |
944 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | 1001 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); |
945 | bfa_q_qe_init(qe); | 1002 | bfa_q_qe_init(qe); |
946 | mac = (struct bna_mac *)qe; | 1003 | mac = (struct bna_mac *)qe; |
947 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | 1004 | bna_cam_mod_mac_put(&mcam_mod->free_q, mac); |
948 | } | 1005 | } |
949 | 1006 | ||
950 | /* Schedule active_q entries for deletion */ | 1007 | /* Schedule active_q entries for deletion */ |
@@ -952,7 +1009,26 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, | |||
952 | bfa_q_deq(&rxf->mcast_active_q, &qe); | 1009 | bfa_q_deq(&rxf->mcast_active_q, &qe); |
953 | mac = (struct bna_mac *)qe; | 1010 | mac = (struct bna_mac *)qe; |
954 | bfa_q_qe_init(&mac->qe); | 1011 | bfa_q_qe_init(&mac->qe); |
955 | list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); | 1012 | |
1013 | del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); | ||
1014 | |||
1015 | memcpy(del_mac, mac, sizeof(*del_mac)); | ||
1016 | list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); | ||
1017 | mac->handle = NULL; | ||
1018 | bna_cam_mod_mac_put(&mcam_mod->free_q, mac); | ||
1019 | } | ||
1020 | |||
1021 | /* Allocate nodes */ | ||
1022 | INIT_LIST_HEAD(&list_head); | ||
1023 | for (i = 0, mcaddr = mclist; i < count; i++) { | ||
1024 | mac = bna_cam_mod_mac_get(&mcam_mod->free_q); | ||
1025 | if (mac == NULL) | ||
1026 | goto err_return; | ||
1027 | bfa_q_qe_init(&mac->qe); | ||
1028 | memcpy(mac->addr, mcaddr, ETH_ALEN); | ||
1029 | list_add_tail(&mac->qe, &list_head); | ||
1030 | |||
1031 | mcaddr += ETH_ALEN; | ||
956 | } | 1032 | } |
957 | 1033 | ||
958 | /* Add the new entries */ | 1034 | /* Add the new entries */ |
@@ -974,13 +1050,56 @@ err_return: | |||
974 | bfa_q_deq(&list_head, &qe); | 1050 | bfa_q_deq(&list_head, &qe); |
975 | mac = (struct bna_mac *)qe; | 1051 | mac = (struct bna_mac *)qe; |
976 | bfa_q_qe_init(&mac->qe); | 1052 | bfa_q_qe_init(&mac->qe); |
977 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | 1053 | bna_cam_mod_mac_put(&mcam_mod->free_q, mac); |
978 | } | 1054 | } |
979 | 1055 | ||
980 | return BNA_CB_MCAST_LIST_FULL; | 1056 | return BNA_CB_MCAST_LIST_FULL; |
981 | } | 1057 | } |
982 | 1058 | ||
983 | void | 1059 | void |
1060 | bna_rx_mcast_delall(struct bna_rx *rx, | ||
1061 | void (*cbfn)(struct bnad *, struct bna_rx *)) | ||
1062 | { | ||
1063 | struct bna_rxf *rxf = &rx->rxf; | ||
1064 | struct list_head *qe; | ||
1065 | struct bna_mac *mac, *del_mac; | ||
1066 | int need_hw_config = 0; | ||
1067 | |||
1068 | /* Purge all entries from pending_add_q */ | ||
1069 | while (!list_empty(&rxf->mcast_pending_add_q)) { | ||
1070 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | ||
1071 | mac = (struct bna_mac *)qe; | ||
1072 | bfa_q_qe_init(&mac->qe); | ||
1073 | bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); | ||
1074 | } | ||
1075 | |||
1076 | /* Schedule all entries in active_q for deletion */ | ||
1077 | while (!list_empty(&rxf->mcast_active_q)) { | ||
1078 | bfa_q_deq(&rxf->mcast_active_q, &qe); | ||
1079 | mac = (struct bna_mac *)qe; | ||
1080 | bfa_q_qe_init(&mac->qe); | ||
1081 | |||
1082 | del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); | ||
1083 | |||
1084 | memcpy(del_mac, mac, sizeof(*del_mac)); | ||
1085 | list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); | ||
1086 | mac->handle = NULL; | ||
1087 | bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); | ||
1088 | need_hw_config = 1; | ||
1089 | } | ||
1090 | |||
1091 | if (need_hw_config) { | ||
1092 | rxf->cam_fltr_cbfn = cbfn; | ||
1093 | rxf->cam_fltr_cbarg = rx->bna->bnad; | ||
1094 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | ||
1095 | return; | ||
1096 | } | ||
1097 | |||
1098 | if (cbfn) | ||
1099 | (*cbfn)(rx->bna->bnad, rx); | ||
1100 | } | ||
1101 | |||
1102 | void | ||
984 | bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) | 1103 | bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) |
985 | { | 1104 | { |
986 | struct bna_rxf *rxf = &rx->rxf; | 1105 | struct bna_rxf *rxf = &rx->rxf; |
@@ -1022,7 +1141,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) | |||
1022 | bfa_q_qe_init(qe); | 1141 | bfa_q_qe_init(qe); |
1023 | mac = (struct bna_mac *)qe; | 1142 | mac = (struct bna_mac *)qe; |
1024 | bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | 1143 | bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); |
1025 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | 1144 | bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac); |
1026 | return 1; | 1145 | return 1; |
1027 | } | 1146 | } |
1028 | 1147 | ||
@@ -1062,11 +1181,13 @@ bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |||
1062 | bfa_q_qe_init(qe); | 1181 | bfa_q_qe_init(qe); |
1063 | mac = (struct bna_mac *)qe; | 1182 | mac = (struct bna_mac *)qe; |
1064 | if (cleanup == BNA_SOFT_CLEANUP) | 1183 | if (cleanup == BNA_SOFT_CLEANUP) |
1065 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | 1184 | bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), |
1185 | mac); | ||
1066 | else { | 1186 | else { |
1067 | bna_bfi_ucast_req(rxf, mac, | 1187 | bna_bfi_ucast_req(rxf, mac, |
1068 | BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | 1188 | BFI_ENET_H2I_MAC_UCAST_DEL_REQ); |
1069 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | 1189 | bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), |
1190 | mac); | ||
1070 | return 1; | 1191 | return 1; |
1071 | } | 1192 | } |
1072 | } | 1193 | } |
@@ -1690,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) | |||
1690 | cfg_req->mh.num_entries = htons( | 1811 | cfg_req->mh.num_entries = htons( |
1691 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); | 1812 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); |
1692 | 1813 | ||
1814 | cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); | ||
1693 | cfg_req->num_queue_sets = rx->num_paths; | 1815 | cfg_req->num_queue_sets = rx->num_paths; |
1694 | for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); | 1816 | for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); |
1695 | i < rx->num_paths; | 1817 | i < rx->num_paths; |
@@ -1711,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx) | |||
1711 | /* Large/Single RxQ */ | 1833 | /* Large/Single RxQ */ |
1712 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, | 1834 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, |
1713 | &q0->qpt); | 1835 | &q0->qpt); |
1714 | q0->buffer_size = | 1836 | if (q0->multi_buffer) |
1715 | bna_enet_mtu_get(&rx->bna->enet); | 1837 | /* multi-buffer is enabled by allocating |
1838 | * a new rx with new set of resources. | ||
1839 | * q0->buffer_size should be initialized to | ||
1840 | * fragment size. | ||
1841 | */ | ||
1842 | cfg_req->rx_cfg.multi_buffer = | ||
1843 | BNA_STATUS_T_ENABLED; | ||
1844 | else | ||
1845 | q0->buffer_size = | ||
1846 | bna_enet_mtu_get(&rx->bna->enet); | ||
1716 | cfg_req->q_cfg[i].ql.rx_buffer_size = | 1847 | cfg_req->q_cfg[i].ql.rx_buffer_size = |
1717 | htons((u16)q0->buffer_size); | 1848 | htons((u16)q0->buffer_size); |
1718 | break; | 1849 | break; |
@@ -2262,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) | |||
2262 | u32 hq_depth; | 2393 | u32 hq_depth; |
2263 | u32 dq_depth; | 2394 | u32 dq_depth; |
2264 | 2395 | ||
2265 | dq_depth = q_cfg->q_depth; | 2396 | dq_depth = q_cfg->q0_depth; |
2266 | hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); | 2397 | hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); |
2267 | cq_depth = dq_depth + hq_depth; | 2398 | cq_depth = dq_depth + hq_depth; |
2268 | 2399 | ||
2269 | BNA_TO_POWER_OF_2_HIGH(cq_depth); | 2400 | BNA_TO_POWER_OF_2_HIGH(cq_depth); |
@@ -2380,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2380 | struct bna_rxq *q0; | 2511 | struct bna_rxq *q0; |
2381 | struct bna_rxq *q1; | 2512 | struct bna_rxq *q1; |
2382 | struct bna_intr_info *intr_info; | 2513 | struct bna_intr_info *intr_info; |
2383 | u32 page_count; | 2514 | struct bna_mem_descr *hqunmap_mem; |
2515 | struct bna_mem_descr *dqunmap_mem; | ||
2384 | struct bna_mem_descr *ccb_mem; | 2516 | struct bna_mem_descr *ccb_mem; |
2385 | struct bna_mem_descr *rcb_mem; | 2517 | struct bna_mem_descr *rcb_mem; |
2386 | struct bna_mem_descr *unmapq_mem; | ||
2387 | struct bna_mem_descr *cqpt_mem; | 2518 | struct bna_mem_descr *cqpt_mem; |
2388 | struct bna_mem_descr *cswqpt_mem; | 2519 | struct bna_mem_descr *cswqpt_mem; |
2389 | struct bna_mem_descr *cpage_mem; | 2520 | struct bna_mem_descr *cpage_mem; |
@@ -2393,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2393 | struct bna_mem_descr *dsqpt_mem; | 2524 | struct bna_mem_descr *dsqpt_mem; |
2394 | struct bna_mem_descr *hpage_mem; | 2525 | struct bna_mem_descr *hpage_mem; |
2395 | struct bna_mem_descr *dpage_mem; | 2526 | struct bna_mem_descr *dpage_mem; |
2396 | int i; | 2527 | u32 dpage_count, hpage_count; |
2397 | int dpage_count, hpage_count, rcb_idx; | 2528 | u32 hq_idx, dq_idx, rcb_idx; |
2529 | u32 cq_depth, i; | ||
2530 | u32 page_count; | ||
2398 | 2531 | ||
2399 | if (!bna_rx_res_check(rx_mod, rx_cfg)) | 2532 | if (!bna_rx_res_check(rx_mod, rx_cfg)) |
2400 | return NULL; | 2533 | return NULL; |
@@ -2402,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2402 | intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | 2535 | intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; |
2403 | ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; | 2536 | ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; |
2404 | rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; | 2537 | rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; |
2405 | unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; | 2538 | dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; |
2539 | hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; | ||
2406 | cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; | 2540 | cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; |
2407 | cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; | 2541 | cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; |
2408 | cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; | 2542 | cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; |
@@ -2454,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2454 | } | 2588 | } |
2455 | 2589 | ||
2456 | rx->num_paths = rx_cfg->num_paths; | 2590 | rx->num_paths = rx_cfg->num_paths; |
2457 | for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { | 2591 | for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; |
2592 | i < rx->num_paths; i++) { | ||
2458 | rxp = bna_rxp_get(rx_mod); | 2593 | rxp = bna_rxp_get(rx_mod); |
2459 | list_add_tail(&rxp->qe, &rx->rxp_q); | 2594 | list_add_tail(&rxp->qe, &rx->rxp_q); |
2460 | rxp->type = rx_cfg->rxp_type; | 2595 | rxp->type = rx_cfg->rxp_type; |
@@ -2497,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2497 | q0->rxp = rxp; | 2632 | q0->rxp = rxp; |
2498 | 2633 | ||
2499 | q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | 2634 | q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; |
2500 | q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; | 2635 | q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; |
2501 | rcb_idx++; | 2636 | rcb_idx++; dq_idx++; |
2502 | q0->rcb->q_depth = rx_cfg->q_depth; | 2637 | q0->rcb->q_depth = rx_cfg->q0_depth; |
2638 | q0->q_depth = rx_cfg->q0_depth; | ||
2639 | q0->multi_buffer = rx_cfg->q0_multi_buf; | ||
2640 | q0->buffer_size = rx_cfg->q0_buf_size; | ||
2641 | q0->num_vecs = rx_cfg->q0_num_vecs; | ||
2503 | q0->rcb->rxq = q0; | 2642 | q0->rcb->rxq = q0; |
2504 | q0->rcb->bnad = bna->bnad; | 2643 | q0->rcb->bnad = bna->bnad; |
2505 | q0->rcb->id = 0; | 2644 | q0->rcb->id = 0; |
@@ -2519,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2519 | q1->rxp = rxp; | 2658 | q1->rxp = rxp; |
2520 | 2659 | ||
2521 | q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | 2660 | q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; |
2522 | q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; | 2661 | q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; |
2523 | rcb_idx++; | 2662 | rcb_idx++; hq_idx++; |
2524 | q1->rcb->q_depth = rx_cfg->q_depth; | 2663 | q1->rcb->q_depth = rx_cfg->q1_depth; |
2664 | q1->q_depth = rx_cfg->q1_depth; | ||
2665 | q1->multi_buffer = BNA_STATUS_T_DISABLED; | ||
2666 | q1->num_vecs = 1; | ||
2525 | q1->rcb->rxq = q1; | 2667 | q1->rcb->rxq = q1; |
2526 | q1->rcb->bnad = bna->bnad; | 2668 | q1->rcb->bnad = bna->bnad; |
2527 | q1->rcb->id = 1; | 2669 | q1->rcb->id = 1; |
2528 | q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? | 2670 | q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? |
2529 | rx_cfg->hds_config.forced_offset | 2671 | rx_cfg->hds_config.forced_offset |
2530 | : rx_cfg->small_buff_size; | 2672 | : rx_cfg->q1_buf_size; |
2531 | q1->rx_packets = q1->rx_bytes = 0; | 2673 | q1->rx_packets = q1->rx_bytes = 0; |
2532 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; | 2674 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; |
2533 | 2675 | ||
@@ -2542,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad, | |||
2542 | /* Setup CQ */ | 2684 | /* Setup CQ */ |
2543 | 2685 | ||
2544 | rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; | 2686 | rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; |
2545 | rxp->cq.ccb->q_depth = rx_cfg->q_depth + | 2687 | cq_depth = rx_cfg->q0_depth + |
2546 | ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? | 2688 | ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? |
2547 | 0 : rx_cfg->q_depth); | 2689 | 0 : rx_cfg->q1_depth); |
2690 | /* if multi-buffer is enabled sum of q0_depth | ||
2691 | * and q1_depth need not be a power of 2 | ||
2692 | */ | ||
2693 | BNA_TO_POWER_OF_2_HIGH(cq_depth); | ||
2694 | rxp->cq.ccb->q_depth = cq_depth; | ||
2548 | rxp->cq.ccb->cq = &rxp->cq; | 2695 | rxp->cq.ccb->cq = &rxp->cq; |
2549 | rxp->cq.ccb->rcb[0] = q0->rcb; | 2696 | rxp->cq.ccb->rcb[0] = q0->rcb; |
2550 | q0->rcb->ccb = rxp->cq.ccb; | 2697 | q0->rcb->ccb = rxp->cq.ccb; |
@@ -2670,6 +2817,30 @@ bna_rx_cleanup_complete(struct bna_rx *rx) | |||
2670 | bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); | 2817 | bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); |
2671 | } | 2818 | } |
2672 | 2819 | ||
2820 | void | ||
2821 | bna_rx_vlan_strip_enable(struct bna_rx *rx) | ||
2822 | { | ||
2823 | struct bna_rxf *rxf = &rx->rxf; | ||
2824 | |||
2825 | if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { | ||
2826 | rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; | ||
2827 | rxf->vlan_strip_pending = true; | ||
2828 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | ||
2829 | } | ||
2830 | } | ||
2831 | |||
2832 | void | ||
2833 | bna_rx_vlan_strip_disable(struct bna_rx *rx) | ||
2834 | { | ||
2835 | struct bna_rxf *rxf = &rx->rxf; | ||
2836 | |||
2837 | if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { | ||
2838 | rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; | ||
2839 | rxf->vlan_strip_pending = true; | ||
2840 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | ||
2841 | } | ||
2842 | } | ||
2843 | |||
2673 | enum bna_cb_status | 2844 | enum bna_cb_status |
2674 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, | 2845 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, |
2675 | enum bna_rxmode bitmask, | 2846 | enum bna_rxmode bitmask, |
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index dc50f7836b6d..621547cd3504 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h | |||
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type { | |||
109 | enum bna_rx_mem_type { | 109 | enum bna_rx_mem_type { |
110 | BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ | 110 | BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ |
111 | BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ | 111 | BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ |
112 | BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */ | 112 | BNA_RX_RES_MEM_T_UNMAPHQ = 2, |
113 | BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */ | 113 | BNA_RX_RES_MEM_T_UNMAPDQ = 3, |
114 | BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */ | 114 | BNA_RX_RES_MEM_T_CQPT = 4, |
115 | BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */ | 115 | BNA_RX_RES_MEM_T_CSWQPT = 5, |
116 | BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */ | 116 | BNA_RX_RES_MEM_T_CQPT_PAGE = 6, |
117 | BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */ | 117 | BNA_RX_RES_MEM_T_HQPT = 7, |
118 | BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */ | 118 | BNA_RX_RES_MEM_T_DQPT = 8, |
119 | BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */ | 119 | BNA_RX_RES_MEM_T_HSWQPT = 9, |
120 | BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */ | 120 | BNA_RX_RES_MEM_T_DSWQPT = 10, |
121 | BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */ | 121 | BNA_RX_RES_MEM_T_DPAGE = 11, |
122 | BNA_RX_RES_MEM_T_IBIDX = 12, | 122 | BNA_RX_RES_MEM_T_HPAGE = 12, |
123 | BNA_RX_RES_MEM_T_RIT = 13, | 123 | BNA_RX_RES_MEM_T_IBIDX = 13, |
124 | BNA_RX_RES_T_INTR = 14, /* Rx interrupts */ | 124 | BNA_RX_RES_MEM_T_RIT = 14, |
125 | BNA_RX_RES_T_MAX = 15 | 125 | BNA_RX_RES_T_INTR = 15, |
126 | BNA_RX_RES_T_MAX = 16 | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | enum bna_tx_type { | 129 | enum bna_tx_type { |
@@ -583,6 +584,8 @@ struct bna_rxq { | |||
583 | 584 | ||
584 | int buffer_size; | 585 | int buffer_size; |
585 | int q_depth; | 586 | int q_depth; |
587 | u32 num_vecs; | ||
588 | enum bna_status multi_buffer; | ||
586 | 589 | ||
587 | struct bna_qpt qpt; | 590 | struct bna_qpt qpt; |
588 | struct bna_rcb *rcb; | 591 | struct bna_rcb *rcb; |
@@ -632,6 +635,8 @@ struct bna_ccb { | |||
632 | struct bna_rcb *rcb[2]; | 635 | struct bna_rcb *rcb[2]; |
633 | void *ctrl; /* For bnad */ | 636 | void *ctrl; /* For bnad */ |
634 | struct bna_pkt_rate pkt_rate; | 637 | struct bna_pkt_rate pkt_rate; |
638 | u32 pkts_una; | ||
639 | u32 bytes_per_intr; | ||
635 | 640 | ||
636 | /* Control path */ | 641 | /* Control path */ |
637 | struct bna_cq *cq; | 642 | struct bna_cq *cq; |
@@ -671,14 +676,22 @@ struct bna_rx_config { | |||
671 | int num_paths; | 676 | int num_paths; |
672 | enum bna_rxp_type rxp_type; | 677 | enum bna_rxp_type rxp_type; |
673 | int paused; | 678 | int paused; |
674 | int q_depth; | ||
675 | int coalescing_timeo; | 679 | int coalescing_timeo; |
676 | /* | 680 | /* |
677 | * Small/Large (or Header/Data) buffer size to be configured | 681 | * Small/Large (or Header/Data) buffer size to be configured |
678 | * for SLR and HDS queue type. Large buffer size comes from | 682 | * for SLR and HDS queue type. |
679 | * enet->mtu. | ||
680 | */ | 683 | */ |
681 | int small_buff_size; | 684 | u32 frame_size; |
685 | |||
686 | /* header or small queue */ | ||
687 | u32 q1_depth; | ||
688 | u32 q1_buf_size; | ||
689 | |||
690 | /* data or large queue */ | ||
691 | u32 q0_depth; | ||
692 | u32 q0_buf_size; | ||
693 | u32 q0_num_vecs; | ||
694 | enum bna_status q0_multi_buf; | ||
682 | 695 | ||
683 | enum bna_status rss_status; | 696 | enum bna_status rss_status; |
684 | struct bna_rss_config rss_config; | 697 | struct bna_rss_config rss_config; |
@@ -866,8 +879,9 @@ struct bna_rx_mod { | |||
866 | /* CAM */ | 879 | /* CAM */ |
867 | 880 | ||
868 | struct bna_ucam_mod { | 881 | struct bna_ucam_mod { |
869 | struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */ | 882 | struct bna_mac *ucmac; /* num_ucmac * 2 entries */ |
870 | struct list_head free_q; | 883 | struct list_head free_q; |
884 | struct list_head del_q; | ||
871 | 885 | ||
872 | struct bna *bna; | 886 | struct bna *bna; |
873 | }; | 887 | }; |
@@ -880,9 +894,10 @@ struct bna_mcam_handle { | |||
880 | }; | 894 | }; |
881 | 895 | ||
882 | struct bna_mcam_mod { | 896 | struct bna_mcam_mod { |
883 | struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */ | 897 | struct bna_mac *mcmac; /* num_mcmac * 2 entries */ |
884 | struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */ | 898 | struct bna_mcam_handle *mchandle; /* num_mcmac entries */ |
885 | struct list_head free_q; | 899 | struct list_head free_q; |
900 | struct list_head del_q; | ||
886 | struct list_head free_handle_q; | 901 | struct list_head free_handle_q; |
887 | 902 | ||
888 | struct bna *bna; | 903 | struct bna *bna; |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f1a516bc31c1..d31524f9d062 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -142,7 +142,8 @@ bnad_tx_buff_unmap(struct bnad *bnad, | |||
142 | 142 | ||
143 | dma_unmap_page(&bnad->pcidev->dev, | 143 | dma_unmap_page(&bnad->pcidev->dev, |
144 | dma_unmap_addr(&unmap->vectors[vector], dma_addr), | 144 | dma_unmap_addr(&unmap->vectors[vector], dma_addr), |
145 | skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE); | 145 | dma_unmap_len(&unmap->vectors[vector], dma_len), |
146 | DMA_TO_DEVICE); | ||
146 | dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); | 147 | dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); |
147 | nvecs--; | 148 | nvecs--; |
148 | } | 149 | } |
@@ -282,27 +283,32 @@ static int | |||
282 | bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) | 283 | bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) |
283 | { | 284 | { |
284 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | 285 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; |
285 | int mtu, order; | 286 | int order; |
286 | 287 | ||
287 | bnad_rxq_alloc_uninit(bnad, rcb); | 288 | bnad_rxq_alloc_uninit(bnad, rcb); |
288 | 289 | ||
289 | mtu = bna_enet_mtu_get(&bnad->bna.enet); | 290 | order = get_order(rcb->rxq->buffer_size); |
290 | order = get_order(mtu); | 291 | |
292 | unmap_q->type = BNAD_RXBUF_PAGE; | ||
291 | 293 | ||
292 | if (bna_is_small_rxq(rcb->id)) { | 294 | if (bna_is_small_rxq(rcb->id)) { |
293 | unmap_q->alloc_order = 0; | 295 | unmap_q->alloc_order = 0; |
294 | unmap_q->map_size = rcb->rxq->buffer_size; | 296 | unmap_q->map_size = rcb->rxq->buffer_size; |
295 | } else { | 297 | } else { |
296 | unmap_q->alloc_order = order; | 298 | if (rcb->rxq->multi_buffer) { |
297 | unmap_q->map_size = | 299 | unmap_q->alloc_order = 0; |
298 | (rcb->rxq->buffer_size > 2048) ? | 300 | unmap_q->map_size = rcb->rxq->buffer_size; |
299 | PAGE_SIZE << order : 2048; | 301 | unmap_q->type = BNAD_RXBUF_MULTI_BUFF; |
302 | } else { | ||
303 | unmap_q->alloc_order = order; | ||
304 | unmap_q->map_size = | ||
305 | (rcb->rxq->buffer_size > 2048) ? | ||
306 | PAGE_SIZE << order : 2048; | ||
307 | } | ||
300 | } | 308 | } |
301 | 309 | ||
302 | BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); | 310 | BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); |
303 | 311 | ||
304 | unmap_q->type = BNAD_RXBUF_PAGE; | ||
305 | |||
306 | return 0; | 312 | return 0; |
307 | } | 313 | } |
308 | 314 | ||
@@ -345,10 +351,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) | |||
345 | for (i = 0; i < rcb->q_depth; i++) { | 351 | for (i = 0; i < rcb->q_depth; i++) { |
346 | struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; | 352 | struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; |
347 | 353 | ||
348 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | 354 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
349 | bnad_rxq_cleanup_page(bnad, unmap); | ||
350 | else | ||
351 | bnad_rxq_cleanup_skb(bnad, unmap); | 355 | bnad_rxq_cleanup_skb(bnad, unmap); |
356 | else | ||
357 | bnad_rxq_cleanup_page(bnad, unmap); | ||
352 | } | 358 | } |
353 | bnad_rxq_alloc_uninit(bnad, rcb); | 359 | bnad_rxq_alloc_uninit(bnad, rcb); |
354 | } | 360 | } |
@@ -480,10 +486,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | |||
480 | if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) | 486 | if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) |
481 | return; | 487 | return; |
482 | 488 | ||
483 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | 489 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
484 | bnad_rxq_refill_page(bnad, rcb, to_alloc); | ||
485 | else | ||
486 | bnad_rxq_refill_skb(bnad, rcb, to_alloc); | 490 | bnad_rxq_refill_skb(bnad, rcb, to_alloc); |
491 | else | ||
492 | bnad_rxq_refill_page(bnad, rcb, to_alloc); | ||
487 | } | 493 | } |
488 | 494 | ||
489 | #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ | 495 | #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ |
@@ -500,72 +506,114 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | |||
500 | #define flags_udp6 (BNA_CQ_EF_IPV6 | \ | 506 | #define flags_udp6 (BNA_CQ_EF_IPV6 | \ |
501 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) | 507 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) |
502 | 508 | ||
503 | static inline struct sk_buff * | 509 | static void |
504 | bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl, | 510 | bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, |
505 | struct bnad_rx_unmap_q *unmap_q, | 511 | u32 sop_ci, u32 nvecs) |
506 | struct bnad_rx_unmap *unmap, | ||
507 | u32 length, u32 flags) | ||
508 | { | 512 | { |
509 | struct bnad *bnad = rx_ctrl->bnad; | 513 | struct bnad_rx_unmap_q *unmap_q; |
510 | struct sk_buff *skb; | 514 | struct bnad_rx_unmap *unmap; |
515 | u32 ci, vec; | ||
511 | 516 | ||
512 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) { | 517 | unmap_q = rcb->unmap_q; |
513 | skb = napi_get_frags(&rx_ctrl->napi); | 518 | for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { |
514 | if (unlikely(!skb)) | 519 | unmap = &unmap_q->unmap[ci]; |
515 | return NULL; | 520 | BNA_QE_INDX_INC(ci, rcb->q_depth); |
521 | |||
522 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) | ||
523 | bnad_rxq_cleanup_skb(bnad, unmap); | ||
524 | else | ||
525 | bnad_rxq_cleanup_page(bnad, unmap); | ||
526 | } | ||
527 | } | ||
528 | |||
529 | static void | ||
530 | bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, | ||
531 | u32 sop_ci, u32 nvecs, u32 last_fraglen) | ||
532 | { | ||
533 | struct bnad *bnad; | ||
534 | u32 ci, vec, len, totlen = 0; | ||
535 | struct bnad_rx_unmap_q *unmap_q; | ||
536 | struct bnad_rx_unmap *unmap; | ||
537 | |||
538 | unmap_q = rcb->unmap_q; | ||
539 | bnad = rcb->bnad; | ||
540 | |||
541 | /* prefetch header */ | ||
542 | prefetch(page_address(unmap_q->unmap[sop_ci].page) + | ||
543 | unmap_q->unmap[sop_ci].page_offset); | ||
544 | |||
545 | for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) { | ||
546 | unmap = &unmap_q->unmap[ci]; | ||
547 | BNA_QE_INDX_INC(ci, rcb->q_depth); | ||
516 | 548 | ||
517 | dma_unmap_page(&bnad->pcidev->dev, | 549 | dma_unmap_page(&bnad->pcidev->dev, |
518 | dma_unmap_addr(&unmap->vector, dma_addr), | 550 | dma_unmap_addr(&unmap->vector, dma_addr), |
519 | unmap->vector.len, DMA_FROM_DEVICE); | 551 | unmap->vector.len, DMA_FROM_DEVICE); |
552 | |||
553 | len = (vec == nvecs) ? | ||
554 | last_fraglen : unmap->vector.len; | ||
555 | totlen += len; | ||
556 | |||
520 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 557 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
521 | unmap->page, unmap->page_offset, length); | 558 | unmap->page, unmap->page_offset, len); |
522 | skb->len += length; | ||
523 | skb->data_len += length; | ||
524 | skb->truesize += length; | ||
525 | 559 | ||
526 | unmap->page = NULL; | 560 | unmap->page = NULL; |
527 | unmap->vector.len = 0; | 561 | unmap->vector.len = 0; |
528 | |||
529 | return skb; | ||
530 | } | 562 | } |
531 | 563 | ||
532 | skb = unmap->skb; | 564 | skb->len += totlen; |
533 | BUG_ON(!skb); | 565 | skb->data_len += totlen; |
566 | skb->truesize += totlen; | ||
567 | } | ||
568 | |||
569 | static inline void | ||
570 | bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, | ||
571 | struct bnad_rx_unmap *unmap, u32 len) | ||
572 | { | ||
573 | prefetch(skb->data); | ||
534 | 574 | ||
535 | dma_unmap_single(&bnad->pcidev->dev, | 575 | dma_unmap_single(&bnad->pcidev->dev, |
536 | dma_unmap_addr(&unmap->vector, dma_addr), | 576 | dma_unmap_addr(&unmap->vector, dma_addr), |
537 | unmap->vector.len, DMA_FROM_DEVICE); | 577 | unmap->vector.len, DMA_FROM_DEVICE); |
538 | 578 | ||
539 | skb_put(skb, length); | 579 | skb_put(skb, len); |
540 | |||
541 | skb->protocol = eth_type_trans(skb, bnad->netdev); | 580 | skb->protocol = eth_type_trans(skb, bnad->netdev); |
542 | 581 | ||
543 | unmap->skb = NULL; | 582 | unmap->skb = NULL; |
544 | unmap->vector.len = 0; | 583 | unmap->vector.len = 0; |
545 | return skb; | ||
546 | } | 584 | } |
547 | 585 | ||
548 | static u32 | 586 | static u32 |
549 | bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | 587 | bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) |
550 | { | 588 | { |
551 | struct bna_cq_entry *cq, *cmpl; | 589 | struct bna_cq_entry *cq, *cmpl, *next_cmpl; |
552 | struct bna_rcb *rcb = NULL; | 590 | struct bna_rcb *rcb = NULL; |
553 | struct bnad_rx_unmap_q *unmap_q; | 591 | struct bnad_rx_unmap_q *unmap_q; |
554 | struct bnad_rx_unmap *unmap; | 592 | struct bnad_rx_unmap *unmap = NULL; |
555 | struct sk_buff *skb; | 593 | struct sk_buff *skb = NULL; |
556 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; | 594 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
557 | struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; | 595 | struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; |
558 | u32 packets = 0, length = 0, flags, masked_flags; | 596 | u32 packets = 0, len = 0, totlen = 0; |
597 | u32 pi, vec, sop_ci = 0, nvecs = 0; | ||
598 | u32 flags, masked_flags; | ||
559 | 599 | ||
560 | prefetch(bnad->netdev); | 600 | prefetch(bnad->netdev); |
561 | 601 | ||
562 | cq = ccb->sw_q; | 602 | cq = ccb->sw_q; |
563 | cmpl = &cq[ccb->producer_index]; | 603 | cmpl = &cq[ccb->producer_index]; |
564 | 604 | ||
565 | while (cmpl->valid && (packets < budget)) { | 605 | while (packets < budget) { |
566 | packets++; | 606 | if (!cmpl->valid) |
567 | flags = ntohl(cmpl->flags); | 607 | break; |
568 | length = ntohs(cmpl->length); | 608 | /* The 'valid' field is set by the adapter, only after writing |
609 | * the other fields of completion entry. Hence, do not load | ||
610 | * other fields of completion entry *before* the 'valid' is | ||
611 | * loaded. Adding the rmb() here prevents the compiler and/or | ||
612 | * CPU from reordering the reads which would potentially result | ||
613 | * in reading stale values in completion entry. | ||
614 | */ | ||
615 | rmb(); | ||
616 | |||
569 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); | 617 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); |
570 | 618 | ||
571 | if (bna_is_small_rxq(cmpl->rxq_id)) | 619 | if (bna_is_small_rxq(cmpl->rxq_id)) |
@@ -574,25 +622,78 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
574 | rcb = ccb->rcb[0]; | 622 | rcb = ccb->rcb[0]; |
575 | 623 | ||
576 | unmap_q = rcb->unmap_q; | 624 | unmap_q = rcb->unmap_q; |
577 | unmap = &unmap_q->unmap[rcb->consumer_index]; | ||
578 | 625 | ||
579 | if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | | 626 | /* start of packet ci */ |
580 | BNA_CQ_EF_FCS_ERROR | | 627 | sop_ci = rcb->consumer_index; |
581 | BNA_CQ_EF_TOO_LONG))) { | 628 | |
582 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | 629 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { |
583 | bnad_rxq_cleanup_page(bnad, unmap); | 630 | unmap = &unmap_q->unmap[sop_ci]; |
584 | else | 631 | skb = unmap->skb; |
585 | bnad_rxq_cleanup_skb(bnad, unmap); | 632 | } else { |
633 | skb = napi_get_frags(&rx_ctrl->napi); | ||
634 | if (unlikely(!skb)) | ||
635 | break; | ||
636 | } | ||
637 | prefetch(skb); | ||
638 | |||
639 | flags = ntohl(cmpl->flags); | ||
640 | len = ntohs(cmpl->length); | ||
641 | totlen = len; | ||
642 | nvecs = 1; | ||
586 | 643 | ||
644 | /* Check all the completions for this frame. | ||
645 | * busy-wait doesn't help much, break here. | ||
646 | */ | ||
647 | if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && | ||
648 | (flags & BNA_CQ_EF_EOP) == 0) { | ||
649 | pi = ccb->producer_index; | ||
650 | do { | ||
651 | BNA_QE_INDX_INC(pi, ccb->q_depth); | ||
652 | next_cmpl = &cq[pi]; | ||
653 | |||
654 | if (!next_cmpl->valid) | ||
655 | break; | ||
656 | /* The 'valid' field is set by the adapter, only | ||
657 | * after writing the other fields of completion | ||
658 | * entry. Hence, do not load other fields of | ||
659 | * completion entry *before* the 'valid' is | ||
660 | * loaded. Adding the rmb() here prevents the | ||
661 | * compiler and/or CPU from reordering the reads | ||
662 | * which would potentially result in reading | ||
663 | * stale values in completion entry. | ||
664 | */ | ||
665 | rmb(); | ||
666 | |||
667 | len = ntohs(next_cmpl->length); | ||
668 | flags = ntohl(next_cmpl->flags); | ||
669 | |||
670 | nvecs++; | ||
671 | totlen += len; | ||
672 | } while ((flags & BNA_CQ_EF_EOP) == 0); | ||
673 | |||
674 | if (!next_cmpl->valid) | ||
675 | break; | ||
676 | } | ||
677 | |||
678 | /* TODO: BNA_CQ_EF_LOCAL ? */ | ||
679 | if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | | ||
680 | BNA_CQ_EF_FCS_ERROR | | ||
681 | BNA_CQ_EF_TOO_LONG))) { | ||
682 | bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); | ||
587 | rcb->rxq->rx_packets_with_error++; | 683 | rcb->rxq->rx_packets_with_error++; |
684 | |||
588 | goto next; | 685 | goto next; |
589 | } | 686 | } |
590 | 687 | ||
591 | skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap, | 688 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
592 | length, flags); | 689 | bnad_cq_setup_skb(bnad, skb, unmap, len); |
690 | else | ||
691 | bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); | ||
593 | 692 | ||
594 | if (unlikely(!skb)) | 693 | packets++; |
595 | break; | 694 | rcb->rxq->rx_packets++; |
695 | rcb->rxq->rx_bytes += totlen; | ||
696 | ccb->bytes_per_intr += totlen; | ||
596 | 697 | ||
597 | masked_flags = flags & flags_cksum_prot_mask; | 698 | masked_flags = flags & flags_cksum_prot_mask; |
598 | 699 | ||
@@ -606,21 +707,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
606 | else | 707 | else |
607 | skb_checksum_none_assert(skb); | 708 | skb_checksum_none_assert(skb); |
608 | 709 | ||
609 | rcb->rxq->rx_packets++; | ||
610 | rcb->rxq->rx_bytes += length; | ||
611 | |||
612 | if (flags & BNA_CQ_EF_VLAN) | 710 | if (flags & BNA_CQ_EF_VLAN) |
613 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); | 711 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); |
614 | 712 | ||
615 | if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) | 713 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
616 | napi_gro_frags(&rx_ctrl->napi); | ||
617 | else | ||
618 | netif_receive_skb(skb); | 714 | netif_receive_skb(skb); |
715 | else | ||
716 | napi_gro_frags(&rx_ctrl->napi); | ||
619 | 717 | ||
620 | next: | 718 | next: |
621 | cmpl->valid = 0; | 719 | BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); |
622 | BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth); | 720 | for (vec = 0; vec < nvecs; vec++) { |
623 | BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); | 721 | cmpl = &cq[ccb->producer_index]; |
722 | cmpl->valid = 0; | ||
723 | BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); | ||
724 | } | ||
624 | cmpl = &cq[ccb->producer_index]; | 725 | cmpl = &cq[ccb->producer_index]; |
625 | } | 726 | } |
626 | 727 | ||
@@ -1899,8 +2000,10 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) | |||
1899 | tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, | 2000 | tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, |
1900 | tx_info); | 2001 | tx_info); |
1901 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 2002 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1902 | if (!tx) | 2003 | if (!tx) { |
2004 | err = -ENOMEM; | ||
1903 | goto err_return; | 2005 | goto err_return; |
2006 | } | ||
1904 | tx_info->tx = tx; | 2007 | tx_info->tx = tx; |
1905 | 2008 | ||
1906 | INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, | 2009 | INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, |
@@ -1911,7 +2014,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) | |||
1911 | err = bnad_tx_msix_register(bnad, tx_info, | 2014 | err = bnad_tx_msix_register(bnad, tx_info, |
1912 | tx_id, bnad->num_txq_per_tx); | 2015 | tx_id, bnad->num_txq_per_tx); |
1913 | if (err) | 2016 | if (err) |
1914 | goto err_return; | 2017 | goto cleanup_tx; |
1915 | } | 2018 | } |
1916 | 2019 | ||
1917 | spin_lock_irqsave(&bnad->bna_lock, flags); | 2020 | spin_lock_irqsave(&bnad->bna_lock, flags); |
@@ -1920,6 +2023,12 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) | |||
1920 | 2023 | ||
1921 | return 0; | 2024 | return 0; |
1922 | 2025 | ||
2026 | cleanup_tx: | ||
2027 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
2028 | bna_tx_destroy(tx_info->tx); | ||
2029 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
2030 | tx_info->tx = NULL; | ||
2031 | tx_info->tx_id = 0; | ||
1923 | err_return: | 2032 | err_return: |
1924 | bnad_tx_res_free(bnad, res_info); | 2033 | bnad_tx_res_free(bnad, res_info); |
1925 | return err; | 2034 | return err; |
@@ -1930,6 +2039,7 @@ err_return: | |||
1930 | static void | 2039 | static void |
1931 | bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | 2040 | bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) |
1932 | { | 2041 | { |
2042 | memset(rx_config, 0, sizeof(*rx_config)); | ||
1933 | rx_config->rx_type = BNA_RX_T_REGULAR; | 2043 | rx_config->rx_type = BNA_RX_T_REGULAR; |
1934 | rx_config->num_paths = bnad->num_rxp_per_rx; | 2044 | rx_config->num_paths = bnad->num_rxp_per_rx; |
1935 | rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; | 2045 | rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; |
@@ -1950,10 +2060,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | |||
1950 | memset(&rx_config->rss_config, 0, | 2060 | memset(&rx_config->rss_config, 0, |
1951 | sizeof(rx_config->rss_config)); | 2061 | sizeof(rx_config->rss_config)); |
1952 | } | 2062 | } |
2063 | |||
2064 | rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); | ||
2065 | rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; | ||
2066 | |||
2067 | /* BNA_RXP_SINGLE - one data-buffer queue | ||
2068 | * BNA_RXP_SLR - one small-buffer and one large-buffer queues | ||
2069 | * BNA_RXP_HDS - one header-buffer and one data-buffer queues | ||
2070 | */ | ||
2071 | /* TODO: configurable param for queue type */ | ||
1953 | rx_config->rxp_type = BNA_RXP_SLR; | 2072 | rx_config->rxp_type = BNA_RXP_SLR; |
1954 | rx_config->q_depth = bnad->rxq_depth; | ||
1955 | 2073 | ||
1956 | rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; | 2074 | if (BNAD_PCI_DEV_IS_CAT2(bnad) && |
2075 | rx_config->frame_size > 4096) { | ||
2076 | /* though size_routing_enable is set in SLR, | ||
2077 | * small packets may get routed to same rxq. | ||
2078 | * set buf_size to 2048 instead of PAGE_SIZE. | ||
2079 | */ | ||
2080 | rx_config->q0_buf_size = 2048; | ||
2081 | /* this should be in multiples of 2 */ | ||
2082 | rx_config->q0_num_vecs = 4; | ||
2083 | rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; | ||
2084 | rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; | ||
2085 | } else { | ||
2086 | rx_config->q0_buf_size = rx_config->frame_size; | ||
2087 | rx_config->q0_num_vecs = 1; | ||
2088 | rx_config->q0_depth = bnad->rxq_depth; | ||
2089 | } | ||
2090 | |||
2091 | /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ | ||
2092 | if (rx_config->rxp_type == BNA_RXP_SLR) { | ||
2093 | rx_config->q1_depth = bnad->rxq_depth; | ||
2094 | rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; | ||
2095 | } | ||
1957 | 2096 | ||
1958 | rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; | 2097 | rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; |
1959 | } | 2098 | } |
@@ -1969,6 +2108,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) | |||
1969 | } | 2108 | } |
1970 | 2109 | ||
1971 | /* Called with mutex_lock(&bnad->conf_mutex) held */ | 2110 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
2111 | u32 | ||
2112 | bnad_reinit_rx(struct bnad *bnad) | ||
2113 | { | ||
2114 | struct net_device *netdev = bnad->netdev; | ||
2115 | u32 err = 0, current_err = 0; | ||
2116 | u32 rx_id = 0, count = 0; | ||
2117 | unsigned long flags; | ||
2118 | |||
2119 | /* destroy and create new rx objects */ | ||
2120 | for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { | ||
2121 | if (!bnad->rx_info[rx_id].rx) | ||
2122 | continue; | ||
2123 | bnad_destroy_rx(bnad, rx_id); | ||
2124 | } | ||
2125 | |||
2126 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
2127 | bna_enet_mtu_set(&bnad->bna.enet, | ||
2128 | BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); | ||
2129 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
2130 | |||
2131 | for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { | ||
2132 | count++; | ||
2133 | current_err = bnad_setup_rx(bnad, rx_id); | ||
2134 | if (current_err && !err) { | ||
2135 | err = current_err; | ||
2136 | pr_err("RXQ:%u setup failed\n", rx_id); | ||
2137 | } | ||
2138 | } | ||
2139 | |||
2140 | /* restore rx configuration */ | ||
2141 | if (bnad->rx_info[0].rx && !err) { | ||
2142 | bnad_restore_vlans(bnad, 0); | ||
2143 | bnad_enable_default_bcast(bnad); | ||
2144 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
2145 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | ||
2146 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
2147 | bnad_set_rx_mode(netdev); | ||
2148 | } | ||
2149 | |||
2150 | return count; | ||
2151 | } | ||
2152 | |||
2153 | /* Called with bnad_conf_lock() held */ | ||
1972 | void | 2154 | void |
1973 | bnad_destroy_rx(struct bnad *bnad, u32 rx_id) | 2155 | bnad_destroy_rx(struct bnad *bnad, u32 rx_id) |
1974 | { | 2156 | { |
@@ -2047,13 +2229,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) | |||
2047 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 2229 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2048 | 2230 | ||
2049 | /* Fill Unmap Q memory requirements */ | 2231 | /* Fill Unmap Q memory requirements */ |
2050 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ], | 2232 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], |
2051 | rx_config->num_paths + | 2233 | rx_config->num_paths, |
2052 | ((rx_config->rxp_type == BNA_RXP_SINGLE) ? | 2234 | (rx_config->q0_depth * |
2053 | 0 : rx_config->num_paths), | 2235 | sizeof(struct bnad_rx_unmap)) + |
2054 | ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) + | 2236 | sizeof(struct bnad_rx_unmap_q)); |
2055 | sizeof(struct bnad_rx_unmap_q))); | 2237 | |
2056 | 2238 | if (rx_config->rxp_type != BNA_RXP_SINGLE) { | |
2239 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], | ||
2240 | rx_config->num_paths, | ||
2241 | (rx_config->q1_depth * | ||
2242 | sizeof(struct bnad_rx_unmap) + | ||
2243 | sizeof(struct bnad_rx_unmap_q))); | ||
2244 | } | ||
2057 | /* Allocate resource */ | 2245 | /* Allocate resource */ |
2058 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); | 2246 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); |
2059 | if (err) | 2247 | if (err) |
@@ -2548,7 +2736,6 @@ bnad_open(struct net_device *netdev) | |||
2548 | int err; | 2736 | int err; |
2549 | struct bnad *bnad = netdev_priv(netdev); | 2737 | struct bnad *bnad = netdev_priv(netdev); |
2550 | struct bna_pause_config pause_config; | 2738 | struct bna_pause_config pause_config; |
2551 | int mtu; | ||
2552 | unsigned long flags; | 2739 | unsigned long flags; |
2553 | 2740 | ||
2554 | mutex_lock(&bnad->conf_mutex); | 2741 | mutex_lock(&bnad->conf_mutex); |
@@ -2567,10 +2754,9 @@ bnad_open(struct net_device *netdev) | |||
2567 | pause_config.tx_pause = 0; | 2754 | pause_config.tx_pause = 0; |
2568 | pause_config.rx_pause = 0; | 2755 | pause_config.rx_pause = 0; |
2569 | 2756 | ||
2570 | mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; | ||
2571 | |||
2572 | spin_lock_irqsave(&bnad->bna_lock, flags); | 2757 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2573 | bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); | 2758 | bna_enet_mtu_set(&bnad->bna.enet, |
2759 | BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); | ||
2574 | bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); | 2760 | bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); |
2575 | bna_enet_enable(&bnad->bna.enet); | 2761 | bna_enet_enable(&bnad->bna.enet); |
2576 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 2762 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
@@ -2624,9 +2810,6 @@ bnad_stop(struct net_device *netdev) | |||
2624 | bnad_destroy_tx(bnad, 0); | 2810 | bnad_destroy_tx(bnad, 0); |
2625 | bnad_destroy_rx(bnad, 0); | 2811 | bnad_destroy_rx(bnad, 0); |
2626 | 2812 | ||
2627 | /* These config flags are cleared in the hardware */ | ||
2628 | bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC); | ||
2629 | |||
2630 | /* Synchronize mailbox IRQ */ | 2813 | /* Synchronize mailbox IRQ */ |
2631 | bnad_mbox_irq_sync(bnad); | 2814 | bnad_mbox_irq_sync(bnad); |
2632 | 2815 | ||
@@ -2784,21 +2967,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2784 | } | 2967 | } |
2785 | 2968 | ||
2786 | tcb = bnad->tx_info[0].tcb[txq_id]; | 2969 | tcb = bnad->tx_info[0].tcb[txq_id]; |
2787 | q_depth = tcb->q_depth; | ||
2788 | prod = tcb->producer_index; | ||
2789 | |||
2790 | unmap_q = tcb->unmap_q; | ||
2791 | 2970 | ||
2792 | /* | 2971 | /* |
2793 | * Takes care of the Tx that is scheduled between clearing the flag | 2972 | * Takes care of the Tx that is scheduled between clearing the flag |
2794 | * and the netif_tx_stop_all_queues() call. | 2973 | * and the netif_tx_stop_all_queues() call. |
2795 | */ | 2974 | */ |
2796 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { | 2975 | if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { |
2797 | dev_kfree_skb(skb); | 2976 | dev_kfree_skb(skb); |
2798 | BNAD_UPDATE_CTR(bnad, tx_skb_stopping); | 2977 | BNAD_UPDATE_CTR(bnad, tx_skb_stopping); |
2799 | return NETDEV_TX_OK; | 2978 | return NETDEV_TX_OK; |
2800 | } | 2979 | } |
2801 | 2980 | ||
2981 | q_depth = tcb->q_depth; | ||
2982 | prod = tcb->producer_index; | ||
2983 | unmap_q = tcb->unmap_q; | ||
2984 | |||
2802 | vectors = 1 + skb_shinfo(skb)->nr_frags; | 2985 | vectors = 1 + skb_shinfo(skb)->nr_frags; |
2803 | wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ | 2986 | wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ |
2804 | 2987 | ||
@@ -2863,7 +3046,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2863 | 3046 | ||
2864 | for (i = 0, vect_id = 0; i < vectors - 1; i++) { | 3047 | for (i = 0, vect_id = 0; i < vectors - 1; i++) { |
2865 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | 3048 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
2866 | u16 size = skb_frag_size(frag); | 3049 | u32 size = skb_frag_size(frag); |
2867 | 3050 | ||
2868 | if (unlikely(size == 0)) { | 3051 | if (unlikely(size == 0)) { |
2869 | /* Undo the changes starting at tcb->producer_index */ | 3052 | /* Undo the changes starting at tcb->producer_index */ |
@@ -2888,6 +3071,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2888 | 3071 | ||
2889 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, | 3072 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, |
2890 | 0, size, DMA_TO_DEVICE); | 3073 | 0, size, DMA_TO_DEVICE); |
3074 | unmap->vectors[vect_id].dma_len = size; | ||
2891 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); | 3075 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); |
2892 | txqent->vector[vect_id].length = htons(size); | 3076 | txqent->vector[vect_id].length = htons(size); |
2893 | dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, | 3077 | dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, |
@@ -2911,6 +3095,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2911 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 3095 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
2912 | return NETDEV_TX_OK; | 3096 | return NETDEV_TX_OK; |
2913 | 3097 | ||
3098 | skb_tx_timestamp(skb); | ||
3099 | |||
2914 | bna_txq_prod_indx_doorbell(tcb); | 3100 | bna_txq_prod_indx_doorbell(tcb); |
2915 | smp_mb(); | 3101 | smp_mb(); |
2916 | 3102 | ||
@@ -2937,73 +3123,133 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |||
2937 | return stats; | 3123 | return stats; |
2938 | } | 3124 | } |
2939 | 3125 | ||
3126 | static void | ||
3127 | bnad_set_rx_ucast_fltr(struct bnad *bnad) | ||
3128 | { | ||
3129 | struct net_device *netdev = bnad->netdev; | ||
3130 | int uc_count = netdev_uc_count(netdev); | ||
3131 | enum bna_cb_status ret; | ||
3132 | u8 *mac_list; | ||
3133 | struct netdev_hw_addr *ha; | ||
3134 | int entry; | ||
3135 | |||
3136 | if (netdev_uc_empty(bnad->netdev)) { | ||
3137 | bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); | ||
3138 | return; | ||
3139 | } | ||
3140 | |||
3141 | if (uc_count > bna_attr(&bnad->bna)->num_ucmac) | ||
3142 | goto mode_default; | ||
3143 | |||
3144 | mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC); | ||
3145 | if (mac_list == NULL) | ||
3146 | goto mode_default; | ||
3147 | |||
3148 | entry = 0; | ||
3149 | netdev_for_each_uc_addr(ha, netdev) { | ||
3150 | memcpy(&mac_list[entry * ETH_ALEN], | ||
3151 | &ha->addr[0], ETH_ALEN); | ||
3152 | entry++; | ||
3153 | } | ||
3154 | |||
3155 | ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, | ||
3156 | mac_list, NULL); | ||
3157 | kfree(mac_list); | ||
3158 | |||
3159 | if (ret != BNA_CB_SUCCESS) | ||
3160 | goto mode_default; | ||
3161 | |||
3162 | return; | ||
3163 | |||
3164 | /* ucast packets not in UCAM are routed to default function */ | ||
3165 | mode_default: | ||
3166 | bnad->cfg_flags |= BNAD_CF_DEFAULT; | ||
3167 | bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); | ||
3168 | } | ||
3169 | |||
3170 | static void | ||
3171 | bnad_set_rx_mcast_fltr(struct bnad *bnad) | ||
3172 | { | ||
3173 | struct net_device *netdev = bnad->netdev; | ||
3174 | int mc_count = netdev_mc_count(netdev); | ||
3175 | enum bna_cb_status ret; | ||
3176 | u8 *mac_list; | ||
3177 | |||
3178 | if (netdev->flags & IFF_ALLMULTI) | ||
3179 | goto mode_allmulti; | ||
3180 | |||
3181 | if (netdev_mc_empty(netdev)) | ||
3182 | return; | ||
3183 | |||
3184 | if (mc_count > bna_attr(&bnad->bna)->num_mcmac) | ||
3185 | goto mode_allmulti; | ||
3186 | |||
3187 | mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC); | ||
3188 | |||
3189 | if (mac_list == NULL) | ||
3190 | goto mode_allmulti; | ||
3191 | |||
3192 | memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN); | ||
3193 | |||
3194 | /* copy rest of the MCAST addresses */ | ||
3195 | bnad_netdev_mc_list_get(netdev, mac_list); | ||
3196 | ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, | ||
3197 | mac_list, NULL); | ||
3198 | kfree(mac_list); | ||
3199 | |||
3200 | if (ret != BNA_CB_SUCCESS) | ||
3201 | goto mode_allmulti; | ||
3202 | |||
3203 | return; | ||
3204 | |||
3205 | mode_allmulti: | ||
3206 | bnad->cfg_flags |= BNAD_CF_ALLMULTI; | ||
3207 | bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); | ||
3208 | } | ||
3209 | |||
2940 | void | 3210 | void |
2941 | bnad_set_rx_mode(struct net_device *netdev) | 3211 | bnad_set_rx_mode(struct net_device *netdev) |
2942 | { | 3212 | { |
2943 | struct bnad *bnad = netdev_priv(netdev); | 3213 | struct bnad *bnad = netdev_priv(netdev); |
2944 | u32 new_mask, valid_mask; | 3214 | enum bna_rxmode new_mode, mode_mask; |
2945 | unsigned long flags; | 3215 | unsigned long flags; |
2946 | 3216 | ||
2947 | spin_lock_irqsave(&bnad->bna_lock, flags); | 3217 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2948 | 3218 | ||
2949 | new_mask = valid_mask = 0; | 3219 | if (bnad->rx_info[0].rx == NULL) { |
2950 | 3220 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2951 | if (netdev->flags & IFF_PROMISC) { | 3221 | return; |
2952 | if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { | ||
2953 | new_mask = BNAD_RXMODE_PROMISC_DEFAULT; | ||
2954 | valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | ||
2955 | bnad->cfg_flags |= BNAD_CF_PROMISC; | ||
2956 | } | ||
2957 | } else { | ||
2958 | if (bnad->cfg_flags & BNAD_CF_PROMISC) { | ||
2959 | new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; | ||
2960 | valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | ||
2961 | bnad->cfg_flags &= ~BNAD_CF_PROMISC; | ||
2962 | } | ||
2963 | } | ||
2964 | |||
2965 | if (netdev->flags & IFF_ALLMULTI) { | ||
2966 | if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { | ||
2967 | new_mask |= BNA_RXMODE_ALLMULTI; | ||
2968 | valid_mask |= BNA_RXMODE_ALLMULTI; | ||
2969 | bnad->cfg_flags |= BNAD_CF_ALLMULTI; | ||
2970 | } | ||
2971 | } else { | ||
2972 | if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { | ||
2973 | new_mask &= ~BNA_RXMODE_ALLMULTI; | ||
2974 | valid_mask |= BNA_RXMODE_ALLMULTI; | ||
2975 | bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; | ||
2976 | } | ||
2977 | } | 3222 | } |
2978 | 3223 | ||
2979 | if (bnad->rx_info[0].rx == NULL) | 3224 | /* clear bnad flags to update it with new settings */ |
2980 | goto unlock; | 3225 | bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | |
3226 | BNAD_CF_ALLMULTI); | ||
2981 | 3227 | ||
2982 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); | 3228 | new_mode = 0; |
3229 | if (netdev->flags & IFF_PROMISC) { | ||
3230 | new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; | ||
3231 | bnad->cfg_flags |= BNAD_CF_PROMISC; | ||
3232 | } else { | ||
3233 | bnad_set_rx_mcast_fltr(bnad); | ||
2983 | 3234 | ||
2984 | if (!netdev_mc_empty(netdev)) { | 3235 | if (bnad->cfg_flags & BNAD_CF_ALLMULTI) |
2985 | u8 *mcaddr_list; | 3236 | new_mode |= BNA_RXMODE_ALLMULTI; |
2986 | int mc_count = netdev_mc_count(netdev); | ||
2987 | 3237 | ||
2988 | /* Index 0 holds the broadcast address */ | 3238 | bnad_set_rx_ucast_fltr(bnad); |
2989 | mcaddr_list = | ||
2990 | kzalloc((mc_count + 1) * ETH_ALEN, | ||
2991 | GFP_ATOMIC); | ||
2992 | if (!mcaddr_list) | ||
2993 | goto unlock; | ||
2994 | 3239 | ||
2995 | memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); | 3240 | if (bnad->cfg_flags & BNAD_CF_DEFAULT) |
3241 | new_mode |= BNA_RXMODE_DEFAULT; | ||
3242 | } | ||
2996 | 3243 | ||
2997 | /* Copy rest of the MC addresses */ | 3244 | mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | |
2998 | bnad_netdev_mc_list_get(netdev, mcaddr_list); | 3245 | BNA_RXMODE_ALLMULTI; |
3246 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); | ||
2999 | 3247 | ||
3000 | bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, | 3248 | if (bnad->cfg_flags & BNAD_CF_PROMISC) |
3001 | mcaddr_list, NULL); | 3249 | bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); |
3250 | else | ||
3251 | bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); | ||
3002 | 3252 | ||
3003 | /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ | ||
3004 | kfree(mcaddr_list); | ||
3005 | } | ||
3006 | unlock: | ||
3007 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 3253 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3008 | } | 3254 | } |
3009 | 3255 | ||
@@ -3033,14 +3279,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr) | |||
3033 | } | 3279 | } |
3034 | 3280 | ||
3035 | static int | 3281 | static int |
3036 | bnad_mtu_set(struct bnad *bnad, int mtu) | 3282 | bnad_mtu_set(struct bnad *bnad, int frame_size) |
3037 | { | 3283 | { |
3038 | unsigned long flags; | 3284 | unsigned long flags; |
3039 | 3285 | ||
3040 | init_completion(&bnad->bnad_completions.mtu_comp); | 3286 | init_completion(&bnad->bnad_completions.mtu_comp); |
3041 | 3287 | ||
3042 | spin_lock_irqsave(&bnad->bna_lock, flags); | 3288 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3043 | bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); | 3289 | bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); |
3044 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 3290 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3045 | 3291 | ||
3046 | wait_for_completion(&bnad->bnad_completions.mtu_comp); | 3292 | wait_for_completion(&bnad->bnad_completions.mtu_comp); |
@@ -3051,18 +3297,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu) | |||
3051 | static int | 3297 | static int |
3052 | bnad_change_mtu(struct net_device *netdev, int new_mtu) | 3298 | bnad_change_mtu(struct net_device *netdev, int new_mtu) |
3053 | { | 3299 | { |
3054 | int err, mtu = netdev->mtu; | 3300 | int err, mtu; |
3055 | struct bnad *bnad = netdev_priv(netdev); | 3301 | struct bnad *bnad = netdev_priv(netdev); |
3302 | u32 rx_count = 0, frame, new_frame; | ||
3056 | 3303 | ||
3057 | if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) | 3304 | if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) |
3058 | return -EINVAL; | 3305 | return -EINVAL; |
3059 | 3306 | ||
3060 | mutex_lock(&bnad->conf_mutex); | 3307 | mutex_lock(&bnad->conf_mutex); |
3061 | 3308 | ||
3309 | mtu = netdev->mtu; | ||
3062 | netdev->mtu = new_mtu; | 3310 | netdev->mtu = new_mtu; |
3063 | 3311 | ||
3064 | mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; | 3312 | frame = BNAD_FRAME_SIZE(mtu); |
3065 | err = bnad_mtu_set(bnad, mtu); | 3313 | new_frame = BNAD_FRAME_SIZE(new_mtu); |
3314 | |||
3315 | /* check if multi-buffer needs to be enabled */ | ||
3316 | if (BNAD_PCI_DEV_IS_CAT2(bnad) && | ||
3317 | netif_running(bnad->netdev)) { | ||
3318 | /* only when transition is over 4K */ | ||
3319 | if ((frame <= 4096 && new_frame > 4096) || | ||
3320 | (frame > 4096 && new_frame <= 4096)) | ||
3321 | rx_count = bnad_reinit_rx(bnad); | ||
3322 | } | ||
3323 | |||
3324 | /* rx_count > 0 - new rx created | ||
3325 | * - Linux set err = 0 and return | ||
3326 | */ | ||
3327 | err = bnad_mtu_set(bnad, new_frame); | ||
3066 | if (err) | 3328 | if (err) |
3067 | err = -EBUSY; | 3329 | err = -EBUSY; |
3068 | 3330 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 4137aaad3ab1..2842c188e0da 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h | |||
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl { | |||
71 | #define BNAD_NAME "bna" | 71 | #define BNAD_NAME "bna" |
72 | #define BNAD_NAME_LEN 64 | 72 | #define BNAD_NAME_LEN 64 |
73 | 73 | ||
74 | #define BNAD_VERSION "3.2.21.1" | 74 | #define BNAD_VERSION "3.2.23.0" |
75 | 75 | ||
76 | #define BNAD_MAILBOX_MSIX_INDEX 0 | 76 | #define BNAD_MAILBOX_MSIX_INDEX 0 |
77 | #define BNAD_MAILBOX_MSIX_VECTORS 1 | 77 | #define BNAD_MAILBOX_MSIX_VECTORS 1 |
@@ -84,7 +84,7 @@ struct bnad_rx_ctrl { | |||
84 | #define BNAD_IOCETH_TIMEOUT 10000 | 84 | #define BNAD_IOCETH_TIMEOUT 10000 |
85 | 85 | ||
86 | #define BNAD_MIN_Q_DEPTH 512 | 86 | #define BNAD_MIN_Q_DEPTH 512 |
87 | #define BNAD_MAX_RXQ_DEPTH 2048 | 87 | #define BNAD_MAX_RXQ_DEPTH 16384 |
88 | #define BNAD_MAX_TXQ_DEPTH 2048 | 88 | #define BNAD_MAX_TXQ_DEPTH 2048 |
89 | 89 | ||
90 | #define BNAD_JUMBO_MTU 9000 | 90 | #define BNAD_JUMBO_MTU 9000 |
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl { | |||
105 | #define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) | 105 | #define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) |
106 | #define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) | 106 | #define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) |
107 | 107 | ||
108 | #define BNAD_FRAME_SIZE(_mtu) \ | ||
109 | (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN) | ||
110 | |||
108 | /* | 111 | /* |
109 | * DATA STRUCTURES | 112 | * DATA STRUCTURES |
110 | */ | 113 | */ |
@@ -219,6 +222,7 @@ struct bnad_rx_info { | |||
219 | 222 | ||
220 | struct bnad_tx_vector { | 223 | struct bnad_tx_vector { |
221 | DEFINE_DMA_UNMAP_ADDR(dma_addr); | 224 | DEFINE_DMA_UNMAP_ADDR(dma_addr); |
225 | DEFINE_DMA_UNMAP_LEN(dma_len); | ||
222 | }; | 226 | }; |
223 | 227 | ||
224 | struct bnad_tx_unmap { | 228 | struct bnad_tx_unmap { |
@@ -234,33 +238,38 @@ struct bnad_rx_vector { | |||
234 | 238 | ||
235 | struct bnad_rx_unmap { | 239 | struct bnad_rx_unmap { |
236 | struct page *page; | 240 | struct page *page; |
237 | u32 page_offset; | ||
238 | struct sk_buff *skb; | 241 | struct sk_buff *skb; |
239 | struct bnad_rx_vector vector; | 242 | struct bnad_rx_vector vector; |
243 | u32 page_offset; | ||
240 | }; | 244 | }; |
241 | 245 | ||
242 | enum bnad_rxbuf_type { | 246 | enum bnad_rxbuf_type { |
243 | BNAD_RXBUF_NONE = 0, | 247 | BNAD_RXBUF_NONE = 0, |
244 | BNAD_RXBUF_SKB = 1, | 248 | BNAD_RXBUF_SK_BUFF = 1, |
245 | BNAD_RXBUF_PAGE = 2, | 249 | BNAD_RXBUF_PAGE = 2, |
246 | BNAD_RXBUF_MULTI = 3 | 250 | BNAD_RXBUF_MULTI_BUFF = 3 |
247 | }; | 251 | }; |
248 | 252 | ||
249 | #define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE) | 253 | #define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF) |
254 | #define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF) | ||
250 | 255 | ||
251 | struct bnad_rx_unmap_q { | 256 | struct bnad_rx_unmap_q { |
252 | int reuse_pi; | 257 | int reuse_pi; |
253 | int alloc_order; | 258 | int alloc_order; |
254 | u32 map_size; | 259 | u32 map_size; |
255 | enum bnad_rxbuf_type type; | 260 | enum bnad_rxbuf_type type; |
256 | struct bnad_rx_unmap unmap[0]; | 261 | struct bnad_rx_unmap unmap[0] ____cacheline_aligned; |
257 | }; | 262 | }; |
258 | 263 | ||
264 | #define BNAD_PCI_DEV_IS_CAT2(_bnad) \ | ||
265 | ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2) | ||
266 | |||
259 | /* Bit mask values for bnad->cfg_flags */ | 267 | /* Bit mask values for bnad->cfg_flags */ |
260 | #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ | 268 | #define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ |
261 | #define BNAD_CF_PROMISC 0x02 | 269 | #define BNAD_CF_PROMISC 0x02 |
262 | #define BNAD_CF_ALLMULTI 0x04 | 270 | #define BNAD_CF_ALLMULTI 0x04 |
263 | #define BNAD_CF_MSIX 0x08 /* If in MSIx mode */ | 271 | #define BNAD_CF_DEFAULT 0x08 |
272 | #define BNAD_CF_MSIX 0x10 /* If in MSIx mode */ | ||
264 | 273 | ||
265 | /* Defines for run_flags bit-mask */ | 274 | /* Defines for run_flags bit-mask */ |
266 | /* Set, tested & cleared using xxx_bit() functions */ | 275 | /* Set, tested & cleared using xxx_bit() functions */ |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 455b5a2e59d4..f9e150825bb5 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c | |||
@@ -1131,6 +1131,7 @@ static const struct ethtool_ops bnad_ethtool_ops = { | |||
1131 | .get_eeprom = bnad_get_eeprom, | 1131 | .get_eeprom = bnad_get_eeprom, |
1132 | .set_eeprom = bnad_set_eeprom, | 1132 | .set_eeprom = bnad_set_eeprom, |
1133 | .flash_device = bnad_flash_device, | 1133 | .flash_device = bnad_flash_device, |
1134 | .get_ts_info = ethtool_op_get_ts_info, | ||
1134 | }; | 1135 | }; |
1135 | 1136 | ||
1136 | void | 1137 | void |
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index 43405f654b4a..b3ff6d507951 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | extern char bfa_version[]; | 38 | extern char bfa_version[]; |
39 | 39 | ||
40 | #define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin" | 40 | #define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin" |
41 | #define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin" | 41 | #define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" |
42 | #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ | 42 | #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ |
43 | 43 | ||
44 | #pragma pack(1) | 44 | #pragma pack(1) |