aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
authorMerav Sicron <meravs@broadcom.com>2012-11-06 19:45:48 -0500
committerDavid S. Miller <davem@davemloft.net>2012-11-07 18:57:19 -0500
commit55c11941e382cb26010138ab824216f47af37606 (patch)
tree92724ef130081b47426919758c5fac4061e9e708 /drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
parentbabc6727d537199f7fbf6dfe711ae418d399b3eb (diff)
bnx2x: Support loading cnic resources at run-time
This patch replaces the BCM_CNIC define with a flag which can change at run-time and which does not use the CONFIG_CNIC kconfig option. For the PF/hypervisor driver cnic is always supported, however allocation of cnic resources and configuration of the HW for offload mode is done only when the cnic module registers bnx2x. Signed-off-by: Merav Sicron <meravs@broadcom.com> Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c460
1 files changed, 313 insertions, 147 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a9031c..54d522da1aa7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1152,6 +1152,25 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1152 } 1152 }
1153} 1153}
1154 1154
1155void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1156{
1157 int j;
1158
1159 for_each_rx_queue_cnic(bp, j) {
1160 struct bnx2x_fastpath *fp = &bp->fp[j];
1161
1162 fp->rx_bd_cons = 0;
1163
1164 /* Activate BD ring */
1165 /* Warning!
1166 * this will generate an interrupt (to the TSTORM)
1167 * must only be done after chip is initialized
1168 */
1169 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1170 fp->rx_sge_prod);
1171 }
1172}
1173
1155void bnx2x_init_rx_rings(struct bnx2x *bp) 1174void bnx2x_init_rx_rings(struct bnx2x *bp)
1156{ 1175{
1157 int func = BP_FUNC(bp); 1176 int func = BP_FUNC(bp);
@@ -1159,7 +1178,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1159 int i, j; 1178 int i, j;
1160 1179
1161 /* Allocate TPA resources */ 1180 /* Allocate TPA resources */
1162 for_each_rx_queue(bp, j) { 1181 for_each_eth_queue(bp, j) {
1163 struct bnx2x_fastpath *fp = &bp->fp[j]; 1182 struct bnx2x_fastpath *fp = &bp->fp[j];
1164 1183
1165 DP(NETIF_MSG_IFUP, 1184 DP(NETIF_MSG_IFUP,
@@ -1217,7 +1236,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1217 } 1236 }
1218 } 1237 }
1219 1238
1220 for_each_rx_queue(bp, j) { 1239 for_each_eth_queue(bp, j) {
1221 struct bnx2x_fastpath *fp = &bp->fp[j]; 1240 struct bnx2x_fastpath *fp = &bp->fp[j];
1222 1241
1223 fp->rx_bd_cons = 0; 1242 fp->rx_bd_cons = 0;
@@ -1244,29 +1263,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1244 } 1263 }
1245} 1264}
1246 1265
1247static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1266static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1248{ 1267{
1249 int i;
1250 u8 cos; 1268 u8 cos;
1269 struct bnx2x *bp = fp->bp;
1251 1270
1252 for_each_tx_queue(bp, i) { 1271 for_each_cos_in_tx_queue(fp, cos) {
1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1272 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1254 for_each_cos_in_tx_queue(fp, cos) { 1273 unsigned pkts_compl = 0, bytes_compl = 0;
1255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1257 1274
1258 u16 sw_prod = txdata->tx_pkt_prod; 1275 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons; 1276 u16 sw_cons = txdata->tx_pkt_cons;
1260 1277
1261 while (sw_cons != sw_prod) { 1278 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1279 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl); 1280 &pkts_compl, &bytes_compl);
1264 sw_cons++; 1281 sw_cons++;
1265 }
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
1269 } 1282 }
1283
1284 netdev_tx_reset_queue(
1285 netdev_get_tx_queue(bp->dev,
1286 txdata->txq_index));
1287 }
1288}
1289
1290static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1291{
1292 int i;
1293
1294 for_each_tx_queue_cnic(bp, i) {
1295 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1296 }
1297}
1298
1299static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1300{
1301 int i;
1302
1303 for_each_eth_queue(bp, i) {
1304 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1270 } 1305 }
1271} 1306}
1272 1307
@@ -1294,11 +1329,20 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1294 } 1329 }
1295} 1330}
1296 1331
1332static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1333{
1334 int j;
1335
1336 for_each_rx_queue_cnic(bp, j) {
1337 bnx2x_free_rx_bds(&bp->fp[j]);
1338 }
1339}
1340
1297static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1341static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298{ 1342{
1299 int j; 1343 int j;
1300 1344
1301 for_each_rx_queue(bp, j) { 1345 for_each_eth_queue(bp, j) {
1302 struct bnx2x_fastpath *fp = &bp->fp[j]; 1346 struct bnx2x_fastpath *fp = &bp->fp[j];
1303 1347
1304 bnx2x_free_rx_bds(fp); 1348 bnx2x_free_rx_bds(fp);
@@ -1308,6 +1352,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1308 } 1352 }
1309} 1353}
1310 1354
1355void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1356{
1357 bnx2x_free_tx_skbs_cnic(bp);
1358 bnx2x_free_rx_skbs_cnic(bp);
1359}
1360
1311void bnx2x_free_skbs(struct bnx2x *bp) 1361void bnx2x_free_skbs(struct bnx2x *bp)
1312{ 1362{
1313 bnx2x_free_tx_skbs(bp); 1363 bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1397,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1397 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348 bp->msix_table[offset].vector); 1398 bp->msix_table[offset].vector);
1349 offset++; 1399 offset++;
1350#ifdef BCM_CNIC 1400
1351 if (nvecs == offset) 1401 if (CNIC_SUPPORT(bp)) {
1352 return; 1402 if (nvecs == offset)
1353 offset++; 1403 return;
1354#endif 1404 offset++;
1405 }
1355 1406
1356 for_each_eth_queue(bp, i) { 1407 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset) 1408 if (nvecs == offset)
@@ -1368,7 +1419,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1368 if (bp->flags & USING_MSIX_FLAG && 1419 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG)) 1420 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1421 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371 CNIC_PRESENT + 1); 1422 CNIC_SUPPORT(bp) + 1);
1372 else 1423 else
1373 free_irq(bp->dev->irq, bp->dev); 1424 free_irq(bp->dev->irq, bp->dev);
1374} 1425}
@@ -1382,12 +1433,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1382 bp->msix_table[0].entry); 1433 bp->msix_table[0].entry);
1383 msix_vec++; 1434 msix_vec++;
1384 1435
1385#ifdef BCM_CNIC 1436 /* Cnic requires an msix vector for itself */
1386 bp->msix_table[msix_vec].entry = msix_vec; 1437 if (CNIC_SUPPORT(bp)) {
1387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1438 bp->msix_table[msix_vec].entry = msix_vec;
1388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1439 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1389 msix_vec++; 1440 msix_vec, bp->msix_table[msix_vec].entry);
1390#endif 1441 msix_vec++;
1442 }
1443
1391 /* We need separate vectors for ETH queues only (not FCoE) */ 1444 /* We need separate vectors for ETH queues only (not FCoE) */
1392 for_each_eth_queue(bp, i) { 1445 for_each_eth_queue(bp, i) {
1393 bp->msix_table[msix_vec].entry = msix_vec; 1446 bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1449,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1396 msix_vec++; 1449 msix_vec++;
1397 } 1450 }
1398 1451
1399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; 1452 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1400 1453
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1454 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402 1455
@@ -1404,7 +1457,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1404 * reconfigure number of tx/rx queues according to available 1457 * reconfigure number of tx/rx queues according to available
1405 * MSI-X vectors 1458 * MSI-X vectors
1406 */ 1459 */
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1460 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1408 /* how less vectors we will have? */ 1461 /* how less vectors we will have? */
1409 int diff = req_cnt - rc; 1462 int diff = req_cnt - rc;
1410 1463
@@ -1419,7 +1472,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1419 /* 1472 /*
1420 * decrease number of queues by number of unallocated entries 1473 * decrease number of queues by number of unallocated entries
1421 */ 1474 */
1422 bp->num_queues -= diff; 1475 bp->num_ethernet_queues -= diff;
1476 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1423 1477
1424 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1478 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1425 bp->num_queues); 1479 bp->num_queues);
@@ -1435,6 +1489,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1489 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG; 1490 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437 1491
1492 BNX2X_DEV_INFO("set number of queues to 1\n");
1493 bp->num_ethernet_queues = 1;
1494 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1438 } else if (rc < 0) { 1495 } else if (rc < 0) {
1439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1496 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1440 goto no_msix; 1497 goto no_msix;
@@ -1464,9 +1521,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1464 return -EBUSY; 1521 return -EBUSY;
1465 } 1522 }
1466 1523
1467#ifdef BCM_CNIC 1524 if (CNIC_SUPPORT(bp))
1468 offset++; 1525 offset++;
1469#endif 1526
1470 for_each_eth_queue(bp, i) { 1527 for_each_eth_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i]; 1528 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1529 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1542,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1485 } 1542 }
1486 1543
1487 i = BNX2X_NUM_ETH_QUEUES(bp); 1544 i = BNX2X_NUM_ETH_QUEUES(bp);
1488 offset = 1 + CNIC_PRESENT; 1545 offset = 1 + CNIC_SUPPORT(bp);
1489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1546 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1490 bp->msix_table[0].vector, 1547 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector, 1548 0, bp->msix_table[offset].vector,
@@ -1556,19 +1613,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
1556 return 0; 1613 return 0;
1557} 1614}
1558 1615
1616static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1617{
1618 int i;
1619
1620 for_each_rx_queue_cnic(bp, i)
1621 napi_enable(&bnx2x_fp(bp, i, napi));
1622}
1623
1559static void bnx2x_napi_enable(struct bnx2x *bp) 1624static void bnx2x_napi_enable(struct bnx2x *bp)
1560{ 1625{
1561 int i; 1626 int i;
1562 1627
1563 for_each_rx_queue(bp, i) 1628 for_each_eth_queue(bp, i)
1564 napi_enable(&bnx2x_fp(bp, i, napi)); 1629 napi_enable(&bnx2x_fp(bp, i, napi));
1565} 1630}
1566 1631
1632static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1633{
1634 int i;
1635
1636 for_each_rx_queue_cnic(bp, i)
1637 napi_disable(&bnx2x_fp(bp, i, napi));
1638}
1639
1567static void bnx2x_napi_disable(struct bnx2x *bp) 1640static void bnx2x_napi_disable(struct bnx2x *bp)
1568{ 1641{
1569 int i; 1642 int i;
1570 1643
1571 for_each_rx_queue(bp, i) 1644 for_each_eth_queue(bp, i)
1572 napi_disable(&bnx2x_fp(bp, i, napi)); 1645 napi_disable(&bnx2x_fp(bp, i, napi));
1573} 1646}
1574 1647
@@ -1576,6 +1649,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
1576{ 1649{
1577 if (netif_running(bp->dev)) { 1650 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp); 1651 bnx2x_napi_enable(bp);
1652 if (CNIC_LOADED(bp))
1653 bnx2x_napi_enable_cnic(bp);
1579 bnx2x_int_enable(bp); 1654 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN) 1655 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev); 1656 netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1661,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586{ 1661{
1587 bnx2x_int_disable_sync(bp, disable_hw); 1662 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp); 1663 bnx2x_napi_disable(bp);
1664 if (CNIC_LOADED(bp))
1665 bnx2x_napi_disable_cnic(bp);
1589} 1666}
1590 1667
1591u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1668u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592{ 1669{
1593 struct bnx2x *bp = netdev_priv(dev); 1670 struct bnx2x *bp = netdev_priv(dev);
1594 1671
1595#ifdef BCM_CNIC 1672 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1596 if (!NO_FCOE(bp)) {
1597 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1673 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto); 1674 u16 ether_type = ntohs(hdr->h_proto);
1599 1675
@@ -1609,7 +1685,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1685 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610 return bnx2x_fcoe_tx(bp, txq_index); 1686 return bnx2x_fcoe_tx(bp, txq_index);
1611 } 1687 }
1612#endif 1688
1613 /* select a non-FCoE queue */ 1689 /* select a non-FCoE queue */
1614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1690 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1615} 1691}
@@ -1618,15 +1694,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1618void bnx2x_set_num_queues(struct bnx2x *bp) 1694void bnx2x_set_num_queues(struct bnx2x *bp)
1619{ 1695{
1620 /* RSS queues */ 1696 /* RSS queues */
1621 bp->num_queues = bnx2x_calc_num_queues(bp); 1697 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1622 1698
1623#ifdef BCM_CNIC
1624 /* override in STORAGE SD modes */ 1699 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) 1700 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626 bp->num_queues = 1; 1701 bp->num_ethernet_queues = 1;
1627#endif 1702
1628 /* Add special queues */ 1703 /* Add special queues */
1629 bp->num_queues += NON_ETH_CONTEXT_USE; 1704 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1705 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1630 1706
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1707 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1632} 1708}
@@ -1653,20 +1729,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1729 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1730 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655 */ 1731 */
1656static int bnx2x_set_real_num_queues(struct bnx2x *bp) 1732static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1657{ 1733{
1658 int rc, tx, rx; 1734 int rc, tx, rx;
1659 1735
1660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1736 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; 1737 rx = BNX2X_NUM_ETH_QUEUES(bp);
1662 1738
1663/* account for fcoe queue */ 1739/* account for fcoe queue */
1664#ifdef BCM_CNIC 1740 if (include_cnic && !NO_FCOE(bp)) {
1665 if (!NO_FCOE(bp)) { 1741 rx++;
1666 rx += FCOE_PRESENT; 1742 tx++;
1667 tx += FCOE_PRESENT;
1668 } 1743 }
1669#endif
1670 1744
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1745 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672 if (rc) { 1746 if (rc) {
@@ -1859,14 +1933,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1859 (bp)->state = BNX2X_STATE_ERROR; \ 1933 (bp)->state = BNX2X_STATE_ERROR; \
1860 goto label; \ 1934 goto label; \
1861 } while (0) 1935 } while (0)
1862#else 1936
1937#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1938 do { \
1939 bp->cnic_loaded = false; \
1940 goto label; \
1941 } while (0)
1942#else /*BNX2X_STOP_ON_ERROR*/
1863#define LOAD_ERROR_EXIT(bp, label) \ 1943#define LOAD_ERROR_EXIT(bp, label) \
1864 do { \ 1944 do { \
1865 (bp)->state = BNX2X_STATE_ERROR; \ 1945 (bp)->state = BNX2X_STATE_ERROR; \
1866 (bp)->panic = 1; \ 1946 (bp)->panic = 1; \
1867 return -EBUSY; \ 1947 return -EBUSY; \
1868 } while (0) 1948 } while (0)
1869#endif 1949#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1950 do { \
1951 bp->cnic_loaded = false; \
1952 (bp)->panic = 1; \
1953 return -EBUSY; \
1954 } while (0)
1955#endif /*BNX2X_STOP_ON_ERROR*/
1870 1956
1871bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) 1957bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872{ 1958{
@@ -1959,10 +2045,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1959 fp->max_cos = 1; 2045 fp->max_cos = 1;
1960 2046
1961 /* Init txdata pointers */ 2047 /* Init txdata pointers */
1962#ifdef BCM_CNIC
1963 if (IS_FCOE_FP(fp)) 2048 if (IS_FCOE_FP(fp))
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2049 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965#endif
1966 if (IS_ETH_FP(fp)) 2050 if (IS_ETH_FP(fp))
1967 for_each_cos_in_tx_queue(fp, cos) 2051 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2052 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2064,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1980 else if (bp->flags & GRO_ENABLE_FLAG) 2064 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO; 2065 fp->mode = TPA_MODE_GRO;
1982 2066
1983#ifdef BCM_CNIC
1984 /* We don't want TPA on an FCoE L2 ring */ 2067 /* We don't want TPA on an FCoE L2 ring */
1985 if (IS_FCOE_FP(fp)) 2068 if (IS_FCOE_FP(fp))
1986 fp->disable_tpa = 1; 2069 fp->disable_tpa = 1;
1987#endif 2070}
2071
2072int bnx2x_load_cnic(struct bnx2x *bp)
2073{
2074 int i, rc, port = BP_PORT(bp);
2075
2076 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2077
2078 mutex_init(&bp->cnic_mutex);
2079
2080 rc = bnx2x_alloc_mem_cnic(bp);
2081 if (rc) {
2082 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2083 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2084 }
2085
2086 rc = bnx2x_alloc_fp_mem_cnic(bp);
2087 if (rc) {
2088 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2089 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2090 }
2091
2092 /* Update the number of queues with the cnic queues */
2093 rc = bnx2x_set_real_num_queues(bp, 1);
2094 if (rc) {
2095 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2096 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2097 }
2098
2099 /* Add all CNIC NAPI objects */
2100 bnx2x_add_all_napi_cnic(bp);
2101 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2102 bnx2x_napi_enable_cnic(bp);
2103
2104 rc = bnx2x_init_hw_func_cnic(bp);
2105 if (rc)
2106 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2107
2108 bnx2x_nic_init_cnic(bp);
2109
2110 /* Enable Timer scan */
2111 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2112
2113 for_each_cnic_queue(bp, i) {
2114 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2115 if (rc) {
2116 BNX2X_ERR("Queue setup failed\n");
2117 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2118 }
2119 }
2120
2121 /* Initialize Rx filter. */
2122 netif_addr_lock_bh(bp->dev);
2123 bnx2x_set_rx_mode(bp->dev);
2124 netif_addr_unlock_bh(bp->dev);
2125
2126 /* re-read iscsi info */
2127 bnx2x_get_iscsi_info(bp);
2128 bnx2x_setup_cnic_irq_info(bp);
2129 bnx2x_setup_cnic_info(bp);
2130 bp->cnic_loaded = true;
2131 if (bp->state == BNX2X_STATE_OPEN)
2132 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2133
2134
2135 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2136
2137 return 0;
2138
2139#ifndef BNX2X_STOP_ON_ERROR
2140load_error_cnic2:
2141 /* Disable Timer scan */
2142 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2143
2144load_error_cnic1:
2145 bnx2x_napi_disable_cnic(bp);
2146 /* Update the number of queues without the cnic queues */
2147 rc = bnx2x_set_real_num_queues(bp, 0);
2148 if (rc)
2149 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2150load_error_cnic0:
2151 BNX2X_ERR("CNIC-related load failed\n");
2152 bnx2x_free_fp_mem_cnic(bp);
2153 bnx2x_free_mem_cnic(bp);
2154 return rc;
2155#endif /* ! BNX2X_STOP_ON_ERROR */
1988} 2156}
1989 2157
1990 2158
@@ -1995,6 +2163,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1995 u32 load_code; 2163 u32 load_code;
1996 int i, rc; 2164 int i, rc;
1997 2165
2166 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2167 DP(NETIF_MSG_IFUP,
2168 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2169
1998#ifdef BNX2X_STOP_ON_ERROR 2170#ifdef BNX2X_STOP_ON_ERROR
1999 if (unlikely(bp->panic)) { 2171 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n"); 2172 BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2194,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2194 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023 for_each_queue(bp, i) 2195 for_each_queue(bp, i)
2024 bnx2x_bz_fp(bp, i); 2196 bnx2x_bz_fp(bp, i);
2025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * 2197 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2026 sizeof(struct bnx2x_fp_txdata)); 2198 bp->num_cnic_queues) *
2199 sizeof(struct bnx2x_fp_txdata));
2027 2200
2201 bp->fcoe_init = false;
2028 2202
2029 /* Set the receive queues buffer size */ 2203 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp); 2204 bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2208,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2034 2208
2035 /* As long as bnx2x_alloc_mem() may possibly update 2209 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always 2210 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 * come after it. 2211 * come after it. At this stage cnic queues are not counted.
2038 */ 2212 */
2039 rc = bnx2x_set_real_num_queues(bp); 2213 rc = bnx2x_set_real_num_queues(bp, 0);
2040 if (rc) { 2214 if (rc) {
2041 BNX2X_ERR("Unable to set real_num_queues\n"); 2215 BNX2X_ERR("Unable to set real_num_queues\n");
2042 LOAD_ERROR_EXIT(bp, load_error0); 2216 LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2224,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2050 2224
2051 /* Add all NAPI objects */ 2225 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp); 2226 bnx2x_add_all_napi(bp);
2227 DP(NETIF_MSG_IFUP, "napi added\n");
2053 bnx2x_napi_enable(bp); 2228 bnx2x_napi_enable(bp);
2054 2229
2055 /* set pf load just before approaching the MCP */ 2230 /* set pf load just before approaching the MCP */
@@ -2191,23 +2366,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2191 LOAD_ERROR_EXIT(bp, load_error3); 2366 LOAD_ERROR_EXIT(bp, load_error3);
2192 } 2367 }
2193 2368
2194#ifdef BCM_CNIC 2369 for_each_nondefault_eth_queue(bp, i) {
2195 /* Enable Timer scan */
2196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197#endif
2198
2199 for_each_nondefault_queue(bp, i) {
2200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2370 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2201 if (rc) { 2371 if (rc) {
2202 BNX2X_ERR("Queue setup failed\n"); 2372 BNX2X_ERR("Queue setup failed\n");
2203 LOAD_ERROR_EXIT(bp, load_error4); 2373 LOAD_ERROR_EXIT(bp, load_error3);
2204 } 2374 }
2205 } 2375 }
2206 2376
2207 rc = bnx2x_init_rss_pf(bp); 2377 rc = bnx2x_init_rss_pf(bp);
2208 if (rc) { 2378 if (rc) {
2209 BNX2X_ERR("PF RSS init failed\n"); 2379 BNX2X_ERR("PF RSS init failed\n");
2210 LOAD_ERROR_EXIT(bp, load_error4); 2380 LOAD_ERROR_EXIT(bp, load_error3);
2211 } 2381 }
2212 2382
2213 /* Now when Clients are configured we are ready to work */ 2383 /* Now when Clients are configured we are ready to work */
@@ -2217,7 +2387,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2217 rc = bnx2x_set_eth_mac(bp, true); 2387 rc = bnx2x_set_eth_mac(bp, true);
2218 if (rc) { 2388 if (rc) {
2219 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2389 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220 LOAD_ERROR_EXIT(bp, load_error4); 2390 LOAD_ERROR_EXIT(bp, load_error3);
2221 } 2391 }
2222 2392
2223 if (bp->pending_max) { 2393 if (bp->pending_max) {
@@ -2264,14 +2434,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2264 /* start the timer */ 2434 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval); 2435 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266 2436
2267#ifdef BCM_CNIC 2437 if (CNIC_ENABLED(bp))
2268 /* re-read iscsi info */ 2438 bnx2x_load_cnic(bp);
2269 bnx2x_get_iscsi_info(bp);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274#endif
2275 2439
2276 /* mark driver is loaded in shmem2 */ 2440 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2441 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2457,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2457 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false); 2458 bnx2x_dcbx_init(bp, false);
2295 2459
2460 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2461
2296 return 0; 2462 return 0;
2297 2463
2298#ifndef BNX2X_STOP_ON_ERROR 2464#ifndef BNX2X_STOP_ON_ERROR
2299load_error4:
2300#ifdef BCM_CNIC
2301 /* Disable Timer scan */
2302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303#endif
2304load_error3: 2465load_error3:
2305 bnx2x_int_disable_sync(bp, 1); 2466 bnx2x_int_disable_sync(bp, 1);
2306 2467
@@ -2338,6 +2499,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2338 int i; 2499 int i;
2339 bool global = false; 2500 bool global = false;
2340 2501
2502 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2503
2341 /* mark driver is unloaded in shmem2 */ 2504 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2505 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343 u32 val; 2506 u32 val;
@@ -2373,14 +2536,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2536 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374 smp_mb(); 2537 smp_mb();
2375 2538
2539 if (CNIC_LOADED(bp))
2540 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2541
2376 /* Stop Tx */ 2542 /* Stop Tx */
2377 bnx2x_tx_disable(bp); 2543 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->dev); 2544 netdev_reset_tc(bp->dev);
2379 2545
2380#ifdef BCM_CNIC
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382#endif
2383
2384 bp->rx_mode = BNX2X_RX_MODE_NONE; 2546 bp->rx_mode = BNX2X_RX_MODE_NONE;
2385 2547
2386 del_timer_sync(&bp->timer); 2548 del_timer_sync(&bp->timer);
@@ -2414,7 +2576,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2414 bnx2x_netif_stop(bp, 1); 2576 bnx2x_netif_stop(bp, 1);
2415 /* Delete all NAPI objects */ 2577 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp); 2578 bnx2x_del_all_napi(bp);
2417 2579 if (CNIC_LOADED(bp))
2580 bnx2x_del_all_napi_cnic(bp);
2418 /* Release IRQs */ 2581 /* Release IRQs */
2419 bnx2x_free_irq(bp); 2582 bnx2x_free_irq(bp);
2420 2583
@@ -2435,12 +2598,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2435 2598
2436 /* Free SKBs, SGEs, TPA pool and driver internals */ 2599 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp); 2600 bnx2x_free_skbs(bp);
2601 if (CNIC_LOADED(bp))
2602 bnx2x_free_skbs_cnic(bp);
2438 for_each_rx_queue(bp, i) 2603 for_each_rx_queue(bp, i)
2439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2604 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2440 2605
2606 if (CNIC_LOADED(bp)) {
2607 bnx2x_free_fp_mem_cnic(bp);
2608 bnx2x_free_mem_cnic(bp);
2609 }
2441 bnx2x_free_mem(bp); 2610 bnx2x_free_mem(bp);
2442 2611
2443 bp->state = BNX2X_STATE_CLOSED; 2612 bp->state = BNX2X_STATE_CLOSED;
2613 bp->cnic_loaded = false;
2444 2614
2445 /* Check if there are pending parity attentions. If there are - set 2615 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS. 2616 * RECOVERY_IN_PROGRESS.
@@ -2460,6 +2630,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2630 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461 bnx2x_disable_close_the_gate(bp); 2631 bnx2x_disable_close_the_gate(bp);
2462 2632
2633 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2634
2463 return 0; 2635 return 0;
2464} 2636}
2465 2637
@@ -2550,7 +2722,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2550 2722
2551 /* Fall out from the NAPI loop if needed */ 2723 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2724 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2553#ifdef BCM_CNIC 2725
2554 /* No need to update SB for FCoE L2 ring as long as 2726 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB 2727 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled. 2728 * has been updated when NAPI was scheduled.
@@ -2559,8 +2731,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2559 napi_complete(napi); 2731 napi_complete(napi);
2560 break; 2732 break;
2561 } 2733 }
2562#endif
2563
2564 bnx2x_update_fpsb_idx(fp); 2734 bnx2x_update_fpsb_idx(fp);
2565 /* bnx2x_has_rx_work() reads the status block, 2735 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices 2736 * thus we need to ensure that status block indices
@@ -2940,7 +3110,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2940 txq_index = skb_get_queue_mapping(skb); 3110 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index); 3111 txq = netdev_get_tx_queue(dev, txq_index);
2942 3112
2943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 3113 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
2944 3114
2945 txdata = &bp->bnx2x_txq[txq_index]; 3115 txdata = &bp->bnx2x_txq[txq_index];
2946 3116
@@ -3339,13 +3509,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3339 return -EINVAL; 3509 return -EINVAL;
3340 } 3510 }
3341 3511
3342#ifdef BCM_CNIC
3343 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && 3512 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344 !is_zero_ether_addr(addr->sa_data)) { 3513 !is_zero_ether_addr(addr->sa_data)) {
3345 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3514 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3346 return -EINVAL; 3515 return -EINVAL;
3347 } 3516 }
3348#endif
3349 3517
3350 if (netif_running(dev)) { 3518 if (netif_running(dev)) {
3351 rc = bnx2x_set_eth_mac(bp, false); 3519 rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3537,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3369 u8 cos; 3537 u8 cos;
3370 3538
3371 /* Common */ 3539 /* Common */
3372#ifdef BCM_CNIC 3540
3373 if (IS_FCOE_IDX(fp_index)) { 3541 if (IS_FCOE_IDX(fp_index)) {
3374 memset(sb, 0, sizeof(union host_hc_status_block)); 3542 memset(sb, 0, sizeof(union host_hc_status_block));
3375 fp->status_blk_mapping = 0; 3543 fp->status_blk_mapping = 0;
3376
3377 } else { 3544 } else {
3378#endif
3379 /* status blocks */ 3545 /* status blocks */
3380 if (!CHIP_IS_E1x(bp)) 3546 if (!CHIP_IS_E1x(bp))
3381 BNX2X_PCI_FREE(sb->e2_sb, 3547 BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3553,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3387 bnx2x_fp(bp, fp_index, 3553 bnx2x_fp(bp, fp_index,
3388 status_blk_mapping), 3554 status_blk_mapping),
3389 sizeof(struct host_hc_status_block_e1x)); 3555 sizeof(struct host_hc_status_block_e1x));
3390#ifdef BCM_CNIC
3391 } 3556 }
3392#endif 3557
3393 /* Rx */ 3558 /* Rx */
3394 if (!skip_rx_queue(bp, fp_index)) { 3559 if (!skip_rx_queue(bp, fp_index)) {
3395 bnx2x_free_rx_bds(fp); 3560 bnx2x_free_rx_bds(fp);
@@ -3431,10 +3596,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3431 /* end of fastpath */ 3596 /* end of fastpath */
3432} 3597}
3433 3598
3599void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3600{
3601 int i;
3602 for_each_cnic_queue(bp, i)
3603 bnx2x_free_fp_mem_at(bp, i);
3604}
3605
3434void bnx2x_free_fp_mem(struct bnx2x *bp) 3606void bnx2x_free_fp_mem(struct bnx2x *bp)
3435{ 3607{
3436 int i; 3608 int i;
3437 for_each_queue(bp, i) 3609 for_each_eth_queue(bp, i)
3438 bnx2x_free_fp_mem_at(bp, i); 3610 bnx2x_free_fp_mem_at(bp, i);
3439} 3611}
3440 3612
@@ -3519,14 +3691,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3519 u8 cos; 3691 u8 cos;
3520 int rx_ring_size = 0; 3692 int rx_ring_size = 0;
3521 3693
3522#ifdef BCM_CNIC
3523 if (!bp->rx_ring_size && 3694 if (!bp->rx_ring_size &&
3524 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 3695 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525 rx_ring_size = MIN_RX_SIZE_NONTPA; 3696 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526 bp->rx_ring_size = rx_ring_size; 3697 bp->rx_ring_size = rx_ring_size;
3527 } else 3698 } else if (!bp->rx_ring_size) {
3528#endif
3529 if (!bp->rx_ring_size) {
3530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3699 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3531 3700
3532 if (CHIP_IS_E3(bp)) { 3701 if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3719,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3550 3719
3551 /* Common */ 3720 /* Common */
3552 sb = &bnx2x_fp(bp, index, status_blk); 3721 sb = &bnx2x_fp(bp, index, status_blk);
3553#ifdef BCM_CNIC 3722
3554 if (!IS_FCOE_IDX(index)) { 3723 if (!IS_FCOE_IDX(index)) {
3555#endif
3556 /* status blocks */ 3724 /* status blocks */
3557 if (!CHIP_IS_E1x(bp)) 3725 if (!CHIP_IS_E1x(bp))
3558 BNX2X_PCI_ALLOC(sb->e2_sb, 3726 BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3730,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3562 BNX2X_PCI_ALLOC(sb->e1x_sb, 3730 BNX2X_PCI_ALLOC(sb->e1x_sb,
3563 &bnx2x_fp(bp, index, status_blk_mapping), 3731 &bnx2x_fp(bp, index, status_blk_mapping),
3564 sizeof(struct host_hc_status_block_e1x)); 3732 sizeof(struct host_hc_status_block_e1x));
3565#ifdef BCM_CNIC
3566 } 3733 }
3567#endif
3568 3734
3569 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 3735 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570 * set shortcuts for it. 3736 * set shortcuts for it.
@@ -3641,31 +3807,31 @@ alloc_mem_err:
3641 return 0; 3807 return 0;
3642} 3808}
3643 3809
3810int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3811{
3812 if (!NO_FCOE(bp))
3813 /* FCoE */
3814 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3815 /* we will fail load process instead of mark
3816 * NO_FCOE_FLAG
3817 */
3818 return -ENOMEM;
3819
3820 return 0;
3821}
3822
3644int bnx2x_alloc_fp_mem(struct bnx2x *bp) 3823int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3645{ 3824{
3646 int i; 3825 int i;
3647 3826
3648 /** 3827 /* 1. Allocate FP for leading - fatal if error
3649 * 1. Allocate FP for leading - fatal if error 3828 * 2. Allocate RSS - fix number of queues if error
3650 * 2. {CNIC} Allocate FCoE FP - fatal if error
3651 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652 * 4. Allocate RSS - fix number of queues if error
3653 */ 3829 */
3654 3830
3655 /* leading */ 3831 /* leading */
3656 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3832 if (bnx2x_alloc_fp_mem_at(bp, 0))
3657 return -ENOMEM; 3833 return -ENOMEM;
3658 3834
3659#ifdef BCM_CNIC
3660 if (!NO_FCOE(bp))
3661 /* FCoE */
3662 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663 /* we will fail load process instead of mark
3664 * NO_FCOE_FLAG
3665 */
3666 return -ENOMEM;
3667#endif
3668
3669 /* RSS */ 3835 /* RSS */
3670 for_each_nondefault_eth_queue(bp, i) 3836 for_each_nondefault_eth_queue(bp, i)
3671 if (bnx2x_alloc_fp_mem_at(bp, i)) 3837 if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3842,17 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3676 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 3842 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3677 3843
3678 WARN_ON(delta < 0); 3844 WARN_ON(delta < 0);
3679#ifdef BCM_CNIC 3845 if (CNIC_SUPPORT(bp))
3680 /** 3846 /* move non eth FPs next to last eth FP
3681 * move non eth FPs next to last eth FP 3847 * must be done in that order
3682 * must be done in that order 3848 * FCOE_IDX < FWD_IDX < OOO_IDX
3683 * FCOE_IDX < FWD_IDX < OOO_IDX 3849 */
3684 */
3685 3850
3686 /* move FCoE fp even NO_FCOE_FLAG is on */ 3851 /* move FCoE fp even NO_FCOE_FLAG is on */
3687 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 3852 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3688#endif 3853 bp->num_ethernet_queues -= delta;
3689 bp->num_queues -= delta; 3854 bp->num_queues = bp->num_ethernet_queues +
3855 bp->num_cnic_queues;
3690 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3856 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691 bp->num_queues + delta, bp->num_queues); 3857 bp->num_queues + delta, bp->num_queues);
3692 } 3858 }
@@ -3711,7 +3877,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3711 struct msix_entry *tbl; 3877 struct msix_entry *tbl;
3712 struct bnx2x_ilt *ilt; 3878 struct bnx2x_ilt *ilt;
3713 int msix_table_size = 0; 3879 int msix_table_size = 0;
3714 int fp_array_size; 3880 int fp_array_size, txq_array_size;
3715 int i; 3881 int i;
3716 3882
3717 /* 3883 /*
@@ -3721,7 +3887,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3721 msix_table_size = bp->igu_sb_cnt + 1; 3887 msix_table_size = bp->igu_sb_cnt + 1;
3722 3888
3723 /* fp array: RSS plus CNIC related L2 queues */ 3889 /* fp array: RSS plus CNIC related L2 queues */
3724 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; 3890 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3725 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); 3891 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3726 3892
3727 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); 3893 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3916,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3750 goto alloc_err; 3916 goto alloc_err;
3751 3917
3752 /* Allocate memory for the transmission queues array */ 3918 /* Allocate memory for the transmission queues array */
3753 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; 3919 txq_array_size =
3754#ifdef BCM_CNIC 3920 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3755 bp->bnx2x_txq_size++; 3921 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3756#endif 3922
3757 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, 3923 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3758 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); 3924 GFP_KERNEL);
3759 if (!bp->bnx2x_txq) 3925 if (!bp->bnx2x_txq)
3760 goto alloc_err; 3926 goto alloc_err;
3761 3927
@@ -3838,7 +4004,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3838 return LINK_CONFIG_IDX(sel_phy_idx); 4004 return LINK_CONFIG_IDX(sel_phy_idx);
3839} 4005}
3840 4006
3841#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 4007#ifdef NETDEV_FCOE_WWNN
3842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4008int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3843{ 4009{
3844 struct bnx2x *bp = netdev_priv(dev); 4010 struct bnx2x *bp = netdev_priv(dev);