aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c563
1 files changed, 388 insertions, 175 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a9031c..f771ddfba646 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -80,12 +80,37 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; 80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 } 81 }
82 82
83 memcpy(&bp->bnx2x_txq[old_txdata_index], 83 memcpy(&bp->bnx2x_txq[new_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index], 84 &bp->bnx2x_txq[old_txdata_index],
85 sizeof(struct bnx2x_fp_txdata)); 85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; 86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
87} 87}
88 88
89/**
90 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
91 *
92 * @bp: driver handle
93 * @delta: number of eth queues which were not allocated
94 */
95static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
96{
97 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
98
99 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
100 * backward along the array could cause memory to be overriden
101 */
102 for (cos = 1; cos < bp->max_cos; cos++) {
103 for (i = 0; i < old_eth_num - delta; i++) {
104 struct bnx2x_fastpath *fp = &bp->fp[i];
105 int new_idx = cos * (old_eth_num - delta) + i;
106
107 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
108 sizeof(struct bnx2x_fp_txdata));
109 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
110 }
111 }
112}
113
89int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 114int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90 115
91/* free skb in the packet ring at pos idx 116/* free skb in the packet ring at pos idx
@@ -552,6 +577,23 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
552 return 0; 577 return 0;
553} 578}
554 579
580static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
581{
582 if (fp->rx_frag_size)
583 put_page(virt_to_head_page(data));
584 else
585 kfree(data);
586}
587
588static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
589{
590 if (fp->rx_frag_size)
591 return netdev_alloc_frag(fp->rx_frag_size);
592
593 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
594}
595
596
555static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 597static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info, 598 struct bnx2x_agg_info *tpa_info,
557 u16 pages, 599 u16 pages,
@@ -574,15 +616,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
574 goto drop; 616 goto drop;
575 617
576 /* Try to allocate the new data */ 618 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 619 new_data = bnx2x_frag_alloc(fp);
578
579 /* Unmap skb in the pool anyway, as we are going to change 620 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation 621 pool entry status to BNX2X_TPA_STOP even if new skb allocation
581 fails. */ 622 fails. */
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 623 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583 fp->rx_buf_size, DMA_FROM_DEVICE); 624 fp->rx_buf_size, DMA_FROM_DEVICE);
584 if (likely(new_data)) 625 if (likely(new_data))
585 skb = build_skb(data, 0); 626 skb = build_skb(data, fp->rx_frag_size);
586 627
587 if (likely(skb)) { 628 if (likely(skb)) {
588#ifdef BNX2X_STOP_ON_ERROR 629#ifdef BNX2X_STOP_ON_ERROR
@@ -619,7 +660,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619 660
620 return; 661 return;
621 } 662 }
622 kfree(new_data); 663 bnx2x_frag_free(fp, new_data);
623drop: 664drop:
624 /* drop the packet and keep the buffer in the bin */ 665 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS, 666 DP(NETIF_MSG_RX_STATUS,
@@ -635,7 +676,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 676 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
636 dma_addr_t mapping; 677 dma_addr_t mapping;
637 678
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 679 data = bnx2x_frag_alloc(fp);
639 if (unlikely(data == NULL)) 680 if (unlikely(data == NULL))
640 return -ENOMEM; 681 return -ENOMEM;
641 682
@@ -643,7 +684,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
643 fp->rx_buf_size, 684 fp->rx_buf_size,
644 DMA_FROM_DEVICE); 685 DMA_FROM_DEVICE);
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 686 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
646 kfree(data); 687 bnx2x_frag_free(fp, data);
647 BNX2X_ERR("Can't map rx data\n"); 688 BNX2X_ERR("Can't map rx data\n");
648 return -ENOMEM; 689 return -ENOMEM;
649 } 690 }
@@ -845,9 +886,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
845 dma_unmap_addr(rx_buf, mapping), 886 dma_unmap_addr(rx_buf, mapping),
846 fp->rx_buf_size, 887 fp->rx_buf_size,
847 DMA_FROM_DEVICE); 888 DMA_FROM_DEVICE);
848 skb = build_skb(data, 0); 889 skb = build_skb(data, fp->rx_frag_size);
849 if (unlikely(!skb)) { 890 if (unlikely(!skb)) {
850 kfree(data); 891 bnx2x_frag_free(fp, data);
851 bnx2x_fp_qstats(bp, fp)-> 892 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++; 893 rx_skb_alloc_failed++;
853 goto next_rx; 894 goto next_rx;
@@ -948,14 +989,12 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp)
948{ 989{
949 mutex_lock(&bp->port.phy_mutex); 990 mutex_lock(&bp->port.phy_mutex);
950 991
951 if (bp->port.need_hw_lock) 992 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
952 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
953} 993}
954 994
955void bnx2x_release_phy_lock(struct bnx2x *bp) 995void bnx2x_release_phy_lock(struct bnx2x *bp)
956{ 996{
957 if (bp->port.need_hw_lock) 997 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
959 998
960 mutex_unlock(&bp->port.phy_mutex); 999 mutex_unlock(&bp->port.phy_mutex);
961} 1000}
@@ -1147,11 +1186,30 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1147 dma_unmap_single(&bp->pdev->dev, 1186 dma_unmap_single(&bp->pdev->dev,
1148 dma_unmap_addr(first_buf, mapping), 1187 dma_unmap_addr(first_buf, mapping),
1149 fp->rx_buf_size, DMA_FROM_DEVICE); 1188 fp->rx_buf_size, DMA_FROM_DEVICE);
1150 kfree(data); 1189 bnx2x_frag_free(fp, data);
1151 first_buf->data = NULL; 1190 first_buf->data = NULL;
1152 } 1191 }
1153} 1192}
1154 1193
1194void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1195{
1196 int j;
1197
1198 for_each_rx_queue_cnic(bp, j) {
1199 struct bnx2x_fastpath *fp = &bp->fp[j];
1200
1201 fp->rx_bd_cons = 0;
1202
1203 /* Activate BD ring */
1204 /* Warning!
1205 * this will generate an interrupt (to the TSTORM)
1206 * must only be done after chip is initialized
1207 */
1208 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1209 fp->rx_sge_prod);
1210 }
1211}
1212
1155void bnx2x_init_rx_rings(struct bnx2x *bp) 1213void bnx2x_init_rx_rings(struct bnx2x *bp)
1156{ 1214{
1157 int func = BP_FUNC(bp); 1215 int func = BP_FUNC(bp);
@@ -1159,7 +1217,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1159 int i, j; 1217 int i, j;
1160 1218
1161 /* Allocate TPA resources */ 1219 /* Allocate TPA resources */
1162 for_each_rx_queue(bp, j) { 1220 for_each_eth_queue(bp, j) {
1163 struct bnx2x_fastpath *fp = &bp->fp[j]; 1221 struct bnx2x_fastpath *fp = &bp->fp[j];
1164 1222
1165 DP(NETIF_MSG_IFUP, 1223 DP(NETIF_MSG_IFUP,
@@ -1173,8 +1231,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1173 struct sw_rx_bd *first_buf = 1231 struct sw_rx_bd *first_buf =
1174 &tpa_info->first_buf; 1232 &tpa_info->first_buf;
1175 1233
1176 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, 1234 first_buf->data = bnx2x_frag_alloc(fp);
1177 GFP_ATOMIC);
1178 if (!first_buf->data) { 1235 if (!first_buf->data) {
1179 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1236 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1180 j); 1237 j);
@@ -1217,7 +1274,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1217 } 1274 }
1218 } 1275 }
1219 1276
1220 for_each_rx_queue(bp, j) { 1277 for_each_eth_queue(bp, j) {
1221 struct bnx2x_fastpath *fp = &bp->fp[j]; 1278 struct bnx2x_fastpath *fp = &bp->fp[j];
1222 1279
1223 fp->rx_bd_cons = 0; 1280 fp->rx_bd_cons = 0;
@@ -1244,29 +1301,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1244 } 1301 }
1245} 1302}
1246 1303
1247static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1304static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1248{ 1305{
1249 int i;
1250 u8 cos; 1306 u8 cos;
1307 struct bnx2x *bp = fp->bp;
1251 1308
1252 for_each_tx_queue(bp, i) { 1309 for_each_cos_in_tx_queue(fp, cos) {
1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1310 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1254 for_each_cos_in_tx_queue(fp, cos) { 1311 unsigned pkts_compl = 0, bytes_compl = 0;
1255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1257 1312
1258 u16 sw_prod = txdata->tx_pkt_prod; 1313 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons; 1314 u16 sw_cons = txdata->tx_pkt_cons;
1260 1315
1261 while (sw_cons != sw_prod) { 1316 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1317 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl); 1318 &pkts_compl, &bytes_compl);
1264 sw_cons++; 1319 sw_cons++;
1265 }
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
1269 } 1320 }
1321
1322 netdev_tx_reset_queue(
1323 netdev_get_tx_queue(bp->dev,
1324 txdata->txq_index));
1325 }
1326}
1327
1328static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1329{
1330 int i;
1331
1332 for_each_tx_queue_cnic(bp, i) {
1333 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1334 }
1335}
1336
1337static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1338{
1339 int i;
1340
1341 for_each_eth_queue(bp, i) {
1342 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1270 } 1343 }
1271} 1344}
1272 1345
@@ -1290,7 +1363,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1290 fp->rx_buf_size, DMA_FROM_DEVICE); 1363 fp->rx_buf_size, DMA_FROM_DEVICE);
1291 1364
1292 rx_buf->data = NULL; 1365 rx_buf->data = NULL;
1293 kfree(data); 1366 bnx2x_frag_free(fp, data);
1367 }
1368}
1369
1370static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1371{
1372 int j;
1373
1374 for_each_rx_queue_cnic(bp, j) {
1375 bnx2x_free_rx_bds(&bp->fp[j]);
1294 } 1376 }
1295} 1377}
1296 1378
@@ -1298,7 +1380,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298{ 1380{
1299 int j; 1381 int j;
1300 1382
1301 for_each_rx_queue(bp, j) { 1383 for_each_eth_queue(bp, j) {
1302 struct bnx2x_fastpath *fp = &bp->fp[j]; 1384 struct bnx2x_fastpath *fp = &bp->fp[j];
1303 1385
1304 bnx2x_free_rx_bds(fp); 1386 bnx2x_free_rx_bds(fp);
@@ -1308,6 +1390,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1308 } 1390 }
1309} 1391}
1310 1392
1393void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1394{
1395 bnx2x_free_tx_skbs_cnic(bp);
1396 bnx2x_free_rx_skbs_cnic(bp);
1397}
1398
1311void bnx2x_free_skbs(struct bnx2x *bp) 1399void bnx2x_free_skbs(struct bnx2x *bp)
1312{ 1400{
1313 bnx2x_free_tx_skbs(bp); 1401 bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1435,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1435 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348 bp->msix_table[offset].vector); 1436 bp->msix_table[offset].vector);
1349 offset++; 1437 offset++;
1350#ifdef BCM_CNIC 1438
1351 if (nvecs == offset) 1439 if (CNIC_SUPPORT(bp)) {
1352 return; 1440 if (nvecs == offset)
1353 offset++; 1441 return;
1354#endif 1442 offset++;
1443 }
1355 1444
1356 for_each_eth_queue(bp, i) { 1445 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset) 1446 if (nvecs == offset)
@@ -1368,7 +1457,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1368 if (bp->flags & USING_MSIX_FLAG && 1457 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG)) 1458 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1459 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371 CNIC_PRESENT + 1); 1460 CNIC_SUPPORT(bp) + 1);
1372 else 1461 else
1373 free_irq(bp->dev->irq, bp->dev); 1462 free_irq(bp->dev->irq, bp->dev);
1374} 1463}
@@ -1382,12 +1471,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1382 bp->msix_table[0].entry); 1471 bp->msix_table[0].entry);
1383 msix_vec++; 1472 msix_vec++;
1384 1473
1385#ifdef BCM_CNIC 1474 /* Cnic requires an msix vector for itself */
1386 bp->msix_table[msix_vec].entry = msix_vec; 1475 if (CNIC_SUPPORT(bp)) {
1387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1476 bp->msix_table[msix_vec].entry = msix_vec;
1388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1477 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1389 msix_vec++; 1478 msix_vec, bp->msix_table[msix_vec].entry);
1390#endif 1479 msix_vec++;
1480 }
1481
1391 /* We need separate vectors for ETH queues only (not FCoE) */ 1482 /* We need separate vectors for ETH queues only (not FCoE) */
1392 for_each_eth_queue(bp, i) { 1483 for_each_eth_queue(bp, i) {
1393 bp->msix_table[msix_vec].entry = msix_vec; 1484 bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1487,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1396 msix_vec++; 1487 msix_vec++;
1397 } 1488 }
1398 1489
1399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; 1490 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1400 1491
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1492 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402 1493
@@ -1404,7 +1495,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1404 * reconfigure number of tx/rx queues according to available 1495 * reconfigure number of tx/rx queues according to available
1405 * MSI-X vectors 1496 * MSI-X vectors
1406 */ 1497 */
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1498 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1408 /* how less vectors we will have? */ 1499 /* how less vectors we will have? */
1409 int diff = req_cnt - rc; 1500 int diff = req_cnt - rc;
1410 1501
@@ -1419,7 +1510,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1419 /* 1510 /*
1420 * decrease number of queues by number of unallocated entries 1511 * decrease number of queues by number of unallocated entries
1421 */ 1512 */
1422 bp->num_queues -= diff; 1513 bp->num_ethernet_queues -= diff;
1514 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1423 1515
1424 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1516 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1425 bp->num_queues); 1517 bp->num_queues);
@@ -1435,6 +1527,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1527 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG; 1528 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437 1529
1530 BNX2X_DEV_INFO("set number of queues to 1\n");
1531 bp->num_ethernet_queues = 1;
1532 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1438 } else if (rc < 0) { 1533 } else if (rc < 0) {
1439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1534 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1440 goto no_msix; 1535 goto no_msix;
@@ -1464,9 +1559,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1464 return -EBUSY; 1559 return -EBUSY;
1465 } 1560 }
1466 1561
1467#ifdef BCM_CNIC 1562 if (CNIC_SUPPORT(bp))
1468 offset++; 1563 offset++;
1469#endif 1564
1470 for_each_eth_queue(bp, i) { 1565 for_each_eth_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i]; 1566 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1567 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1580,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1485 } 1580 }
1486 1581
1487 i = BNX2X_NUM_ETH_QUEUES(bp); 1582 i = BNX2X_NUM_ETH_QUEUES(bp);
1488 offset = 1 + CNIC_PRESENT; 1583 offset = 1 + CNIC_SUPPORT(bp);
1489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1584 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1490 bp->msix_table[0].vector, 1585 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector, 1586 0, bp->msix_table[offset].vector,
@@ -1556,19 +1651,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
1556 return 0; 1651 return 0;
1557} 1652}
1558 1653
1654static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1655{
1656 int i;
1657
1658 for_each_rx_queue_cnic(bp, i)
1659 napi_enable(&bnx2x_fp(bp, i, napi));
1660}
1661
1559static void bnx2x_napi_enable(struct bnx2x *bp) 1662static void bnx2x_napi_enable(struct bnx2x *bp)
1560{ 1663{
1561 int i; 1664 int i;
1562 1665
1563 for_each_rx_queue(bp, i) 1666 for_each_eth_queue(bp, i)
1564 napi_enable(&bnx2x_fp(bp, i, napi)); 1667 napi_enable(&bnx2x_fp(bp, i, napi));
1565} 1668}
1566 1669
1670static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1671{
1672 int i;
1673
1674 for_each_rx_queue_cnic(bp, i)
1675 napi_disable(&bnx2x_fp(bp, i, napi));
1676}
1677
1567static void bnx2x_napi_disable(struct bnx2x *bp) 1678static void bnx2x_napi_disable(struct bnx2x *bp)
1568{ 1679{
1569 int i; 1680 int i;
1570 1681
1571 for_each_rx_queue(bp, i) 1682 for_each_eth_queue(bp, i)
1572 napi_disable(&bnx2x_fp(bp, i, napi)); 1683 napi_disable(&bnx2x_fp(bp, i, napi));
1573} 1684}
1574 1685
@@ -1576,6 +1687,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
1576{ 1687{
1577 if (netif_running(bp->dev)) { 1688 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp); 1689 bnx2x_napi_enable(bp);
1690 if (CNIC_LOADED(bp))
1691 bnx2x_napi_enable_cnic(bp);
1579 bnx2x_int_enable(bp); 1692 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN) 1693 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev); 1694 netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1699,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586{ 1699{
1587 bnx2x_int_disable_sync(bp, disable_hw); 1700 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp); 1701 bnx2x_napi_disable(bp);
1702 if (CNIC_LOADED(bp))
1703 bnx2x_napi_disable_cnic(bp);
1589} 1704}
1590 1705
1591u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1706u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592{ 1707{
1593 struct bnx2x *bp = netdev_priv(dev); 1708 struct bnx2x *bp = netdev_priv(dev);
1594 1709
1595#ifdef BCM_CNIC 1710 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1596 if (!NO_FCOE(bp)) {
1597 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1711 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto); 1712 u16 ether_type = ntohs(hdr->h_proto);
1599 1713
@@ -1609,7 +1723,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1723 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610 return bnx2x_fcoe_tx(bp, txq_index); 1724 return bnx2x_fcoe_tx(bp, txq_index);
1611 } 1725 }
1612#endif 1726
1613 /* select a non-FCoE queue */ 1727 /* select a non-FCoE queue */
1614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1728 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1615} 1729}
@@ -1618,15 +1732,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1618void bnx2x_set_num_queues(struct bnx2x *bp) 1732void bnx2x_set_num_queues(struct bnx2x *bp)
1619{ 1733{
1620 /* RSS queues */ 1734 /* RSS queues */
1621 bp->num_queues = bnx2x_calc_num_queues(bp); 1735 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1622 1736
1623#ifdef BCM_CNIC
1624 /* override in STORAGE SD modes */ 1737 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) 1738 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626 bp->num_queues = 1; 1739 bp->num_ethernet_queues = 1;
1627#endif 1740
1628 /* Add special queues */ 1741 /* Add special queues */
1629 bp->num_queues += NON_ETH_CONTEXT_USE; 1742 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1743 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1630 1744
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1745 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1632} 1746}
@@ -1653,20 +1767,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1767 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1768 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655 */ 1769 */
1656static int bnx2x_set_real_num_queues(struct bnx2x *bp) 1770static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1657{ 1771{
1658 int rc, tx, rx; 1772 int rc, tx, rx;
1659 1773
1660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1774 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; 1775 rx = BNX2X_NUM_ETH_QUEUES(bp);
1662 1776
1663/* account for fcoe queue */ 1777/* account for fcoe queue */
1664#ifdef BCM_CNIC 1778 if (include_cnic && !NO_FCOE(bp)) {
1665 if (!NO_FCOE(bp)) { 1779 rx++;
1666 rx += FCOE_PRESENT; 1780 tx++;
1667 tx += FCOE_PRESENT;
1668 } 1781 }
1669#endif
1670 1782
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1783 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672 if (rc) { 1784 if (rc) {
@@ -1710,6 +1822,10 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1710 mtu + 1822 mtu +
1711 BNX2X_FW_RX_ALIGN_END; 1823 BNX2X_FW_RX_ALIGN_END;
1712 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ 1824 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1825 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1826 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1827 else
1828 fp->rx_frag_size = 0;
1713 } 1829 }
1714} 1830}
1715 1831
@@ -1741,7 +1857,6 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1741 bool config_hash) 1857 bool config_hash)
1742{ 1858{
1743 struct bnx2x_config_rss_params params = {NULL}; 1859 struct bnx2x_config_rss_params params = {NULL};
1744 int i;
1745 1860
1746 /* Although RSS is meaningless when there is a single HW queue we 1861 /* Although RSS is meaningless when there is a single HW queue we
1747 * still need it enabled in order to have HW Rx hash generated. 1862 * still need it enabled in order to have HW Rx hash generated.
@@ -1773,9 +1888,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1773 1888
1774 if (config_hash) { 1889 if (config_hash) {
1775 /* RSS keys */ 1890 /* RSS keys */
1776 for (i = 0; i < sizeof(params.rss_key) / 4; i++) 1891 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1777 params.rss_key[i] = random32();
1778
1779 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); 1892 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1780 } 1893 }
1781 1894
@@ -1859,14 +1972,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1859 (bp)->state = BNX2X_STATE_ERROR; \ 1972 (bp)->state = BNX2X_STATE_ERROR; \
1860 goto label; \ 1973 goto label; \
1861 } while (0) 1974 } while (0)
1862#else 1975
1976#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1977 do { \
1978 bp->cnic_loaded = false; \
1979 goto label; \
1980 } while (0)
1981#else /*BNX2X_STOP_ON_ERROR*/
1863#define LOAD_ERROR_EXIT(bp, label) \ 1982#define LOAD_ERROR_EXIT(bp, label) \
1864 do { \ 1983 do { \
1865 (bp)->state = BNX2X_STATE_ERROR; \ 1984 (bp)->state = BNX2X_STATE_ERROR; \
1866 (bp)->panic = 1; \ 1985 (bp)->panic = 1; \
1867 return -EBUSY; \ 1986 return -EBUSY; \
1868 } while (0) 1987 } while (0)
1869#endif 1988#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1989 do { \
1990 bp->cnic_loaded = false; \
1991 (bp)->panic = 1; \
1992 return -EBUSY; \
1993 } while (0)
1994#endif /*BNX2X_STOP_ON_ERROR*/
1870 1995
1871bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) 1996bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872{ 1997{
@@ -1959,10 +2084,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1959 fp->max_cos = 1; 2084 fp->max_cos = 1;
1960 2085
1961 /* Init txdata pointers */ 2086 /* Init txdata pointers */
1962#ifdef BCM_CNIC
1963 if (IS_FCOE_FP(fp)) 2087 if (IS_FCOE_FP(fp))
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2088 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965#endif
1966 if (IS_ETH_FP(fp)) 2089 if (IS_ETH_FP(fp))
1967 for_each_cos_in_tx_queue(fp, cos) 2090 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2091 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2103,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1980 else if (bp->flags & GRO_ENABLE_FLAG) 2103 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO; 2104 fp->mode = TPA_MODE_GRO;
1982 2105
1983#ifdef BCM_CNIC
1984 /* We don't want TPA on an FCoE L2 ring */ 2106 /* We don't want TPA on an FCoE L2 ring */
1985 if (IS_FCOE_FP(fp)) 2107 if (IS_FCOE_FP(fp))
1986 fp->disable_tpa = 1; 2108 fp->disable_tpa = 1;
1987#endif 2109}
2110
2111int bnx2x_load_cnic(struct bnx2x *bp)
2112{
2113 int i, rc, port = BP_PORT(bp);
2114
2115 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2116
2117 mutex_init(&bp->cnic_mutex);
2118
2119 rc = bnx2x_alloc_mem_cnic(bp);
2120 if (rc) {
2121 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2122 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2123 }
2124
2125 rc = bnx2x_alloc_fp_mem_cnic(bp);
2126 if (rc) {
2127 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2128 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2129 }
2130
2131 /* Update the number of queues with the cnic queues */
2132 rc = bnx2x_set_real_num_queues(bp, 1);
2133 if (rc) {
2134 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2135 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2136 }
2137
2138 /* Add all CNIC NAPI objects */
2139 bnx2x_add_all_napi_cnic(bp);
2140 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2141 bnx2x_napi_enable_cnic(bp);
2142
2143 rc = bnx2x_init_hw_func_cnic(bp);
2144 if (rc)
2145 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2146
2147 bnx2x_nic_init_cnic(bp);
2148
2149 /* Enable Timer scan */
2150 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2151
2152 for_each_cnic_queue(bp, i) {
2153 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2154 if (rc) {
2155 BNX2X_ERR("Queue setup failed\n");
2156 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2157 }
2158 }
2159
2160 /* Initialize Rx filter. */
2161 netif_addr_lock_bh(bp->dev);
2162 bnx2x_set_rx_mode(bp->dev);
2163 netif_addr_unlock_bh(bp->dev);
2164
2165 /* re-read iscsi info */
2166 bnx2x_get_iscsi_info(bp);
2167 bnx2x_setup_cnic_irq_info(bp);
2168 bnx2x_setup_cnic_info(bp);
2169 bp->cnic_loaded = true;
2170 if (bp->state == BNX2X_STATE_OPEN)
2171 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2172
2173
2174 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2175
2176 return 0;
2177
2178#ifndef BNX2X_STOP_ON_ERROR
2179load_error_cnic2:
2180 /* Disable Timer scan */
2181 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2182
2183load_error_cnic1:
2184 bnx2x_napi_disable_cnic(bp);
2185 /* Update the number of queues without the cnic queues */
2186 rc = bnx2x_set_real_num_queues(bp, 0);
2187 if (rc)
2188 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2189load_error_cnic0:
2190 BNX2X_ERR("CNIC-related load failed\n");
2191 bnx2x_free_fp_mem_cnic(bp);
2192 bnx2x_free_mem_cnic(bp);
2193 return rc;
2194#endif /* ! BNX2X_STOP_ON_ERROR */
1988} 2195}
1989 2196
1990 2197
@@ -1995,6 +2202,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1995 u32 load_code; 2202 u32 load_code;
1996 int i, rc; 2203 int i, rc;
1997 2204
2205 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2206 DP(NETIF_MSG_IFUP,
2207 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2208
1998#ifdef BNX2X_STOP_ON_ERROR 2209#ifdef BNX2X_STOP_ON_ERROR
1999 if (unlikely(bp->panic)) { 2210 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n"); 2211 BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2233,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2233 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023 for_each_queue(bp, i) 2234 for_each_queue(bp, i)
2024 bnx2x_bz_fp(bp, i); 2235 bnx2x_bz_fp(bp, i);
2025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * 2236 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2026 sizeof(struct bnx2x_fp_txdata)); 2237 bp->num_cnic_queues) *
2238 sizeof(struct bnx2x_fp_txdata));
2027 2239
2240 bp->fcoe_init = false;
2028 2241
2029 /* Set the receive queues buffer size */ 2242 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp); 2243 bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2247,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2034 2247
2035 /* As long as bnx2x_alloc_mem() may possibly update 2248 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always 2249 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 * come after it. 2250 * come after it. At this stage cnic queues are not counted.
2038 */ 2251 */
2039 rc = bnx2x_set_real_num_queues(bp); 2252 rc = bnx2x_set_real_num_queues(bp, 0);
2040 if (rc) { 2253 if (rc) {
2041 BNX2X_ERR("Unable to set real_num_queues\n"); 2254 BNX2X_ERR("Unable to set real_num_queues\n");
2042 LOAD_ERROR_EXIT(bp, load_error0); 2255 LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2263,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2050 2263
2051 /* Add all NAPI objects */ 2264 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp); 2265 bnx2x_add_all_napi(bp);
2266 DP(NETIF_MSG_IFUP, "napi added\n");
2053 bnx2x_napi_enable(bp); 2267 bnx2x_napi_enable(bp);
2054 2268
2055 /* set pf load just before approaching the MCP */ 2269 /* set pf load just before approaching the MCP */
@@ -2073,7 +2287,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2073 DRV_PULSE_SEQ_MASK); 2287 DRV_PULSE_SEQ_MASK);
2074 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 2288 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2075 2289
2076 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 2290 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2291 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2077 if (!load_code) { 2292 if (!load_code) {
2078 BNX2X_ERR("MCP response failure, aborting\n"); 2293 BNX2X_ERR("MCP response failure, aborting\n");
2079 rc = -EBUSY; 2294 rc = -EBUSY;
@@ -2191,23 +2406,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2191 LOAD_ERROR_EXIT(bp, load_error3); 2406 LOAD_ERROR_EXIT(bp, load_error3);
2192 } 2407 }
2193 2408
2194#ifdef BCM_CNIC 2409 for_each_nondefault_eth_queue(bp, i) {
2195 /* Enable Timer scan */
2196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197#endif
2198
2199 for_each_nondefault_queue(bp, i) {
2200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2410 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2201 if (rc) { 2411 if (rc) {
2202 BNX2X_ERR("Queue setup failed\n"); 2412 BNX2X_ERR("Queue setup failed\n");
2203 LOAD_ERROR_EXIT(bp, load_error4); 2413 LOAD_ERROR_EXIT(bp, load_error3);
2204 } 2414 }
2205 } 2415 }
2206 2416
2207 rc = bnx2x_init_rss_pf(bp); 2417 rc = bnx2x_init_rss_pf(bp);
2208 if (rc) { 2418 if (rc) {
2209 BNX2X_ERR("PF RSS init failed\n"); 2419 BNX2X_ERR("PF RSS init failed\n");
2210 LOAD_ERROR_EXIT(bp, load_error4); 2420 LOAD_ERROR_EXIT(bp, load_error3);
2211 } 2421 }
2212 2422
2213 /* Now when Clients are configured we are ready to work */ 2423 /* Now when Clients are configured we are ready to work */
@@ -2217,7 +2427,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2217 rc = bnx2x_set_eth_mac(bp, true); 2427 rc = bnx2x_set_eth_mac(bp, true);
2218 if (rc) { 2428 if (rc) {
2219 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2429 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220 LOAD_ERROR_EXIT(bp, load_error4); 2430 LOAD_ERROR_EXIT(bp, load_error3);
2221 } 2431 }
2222 2432
2223 if (bp->pending_max) { 2433 if (bp->pending_max) {
@@ -2227,6 +2437,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2227 2437
2228 if (bp->port.pmf) 2438 if (bp->port.pmf)
2229 bnx2x_initial_phy_init(bp, load_mode); 2439 bnx2x_initial_phy_init(bp, load_mode);
2440 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2230 2441
2231 /* Start fast path */ 2442 /* Start fast path */
2232 2443
@@ -2257,21 +2468,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2257 } 2468 }
2258 2469
2259 if (bp->port.pmf) 2470 if (bp->port.pmf)
2260 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); 2471 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2261 else 2472 else
2262 bnx2x__link_status_update(bp); 2473 bnx2x__link_status_update(bp);
2263 2474
2264 /* start the timer */ 2475 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval); 2476 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266 2477
2267#ifdef BCM_CNIC 2478 if (CNIC_ENABLED(bp))
2268 /* re-read iscsi info */ 2479 bnx2x_load_cnic(bp);
2269 bnx2x_get_iscsi_info(bp);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274#endif
2275 2480
2276 /* mark driver is loaded in shmem2 */ 2481 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2482 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2498,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2498 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false); 2499 bnx2x_dcbx_init(bp, false);
2295 2500
2501 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2502
2296 return 0; 2503 return 0;
2297 2504
2298#ifndef BNX2X_STOP_ON_ERROR 2505#ifndef BNX2X_STOP_ON_ERROR
2299load_error4:
2300#ifdef BCM_CNIC
2301 /* Disable Timer scan */
2302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303#endif
2304load_error3: 2506load_error3:
2305 bnx2x_int_disable_sync(bp, 1); 2507 bnx2x_int_disable_sync(bp, 1);
2306 2508
@@ -2338,6 +2540,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2338 int i; 2540 int i;
2339 bool global = false; 2541 bool global = false;
2340 2542
2543 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2544
2341 /* mark driver is unloaded in shmem2 */ 2545 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2546 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343 u32 val; 2547 u32 val;
@@ -2373,14 +2577,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2577 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374 smp_mb(); 2578 smp_mb();
2375 2579
2580 if (CNIC_LOADED(bp))
2581 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2582
2376 /* Stop Tx */ 2583 /* Stop Tx */
2377 bnx2x_tx_disable(bp); 2584 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->dev); 2585 netdev_reset_tc(bp->dev);
2379 2586
2380#ifdef BCM_CNIC
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382#endif
2383
2384 bp->rx_mode = BNX2X_RX_MODE_NONE; 2587 bp->rx_mode = BNX2X_RX_MODE_NONE;
2385 2588
2386 del_timer_sync(&bp->timer); 2589 del_timer_sync(&bp->timer);
@@ -2414,7 +2617,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2414 bnx2x_netif_stop(bp, 1); 2617 bnx2x_netif_stop(bp, 1);
2415 /* Delete all NAPI objects */ 2618 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp); 2619 bnx2x_del_all_napi(bp);
2417 2620 if (CNIC_LOADED(bp))
2621 bnx2x_del_all_napi_cnic(bp);
2418 /* Release IRQs */ 2622 /* Release IRQs */
2419 bnx2x_free_irq(bp); 2623 bnx2x_free_irq(bp);
2420 2624
@@ -2435,12 +2639,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2435 2639
2436 /* Free SKBs, SGEs, TPA pool and driver internals */ 2640 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp); 2641 bnx2x_free_skbs(bp);
2642 if (CNIC_LOADED(bp))
2643 bnx2x_free_skbs_cnic(bp);
2438 for_each_rx_queue(bp, i) 2644 for_each_rx_queue(bp, i)
2439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2645 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2440 2646
2647 if (CNIC_LOADED(bp)) {
2648 bnx2x_free_fp_mem_cnic(bp);
2649 bnx2x_free_mem_cnic(bp);
2650 }
2441 bnx2x_free_mem(bp); 2651 bnx2x_free_mem(bp);
2442 2652
2443 bp->state = BNX2X_STATE_CLOSED; 2653 bp->state = BNX2X_STATE_CLOSED;
2654 bp->cnic_loaded = false;
2444 2655
2445 /* Check if there are pending parity attentions. If there are - set 2656 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS. 2657 * RECOVERY_IN_PROGRESS.
@@ -2460,6 +2671,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2671 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461 bnx2x_disable_close_the_gate(bp); 2672 bnx2x_disable_close_the_gate(bp);
2462 2673
2674 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2675
2463 return 0; 2676 return 0;
2464} 2677}
2465 2678
@@ -2550,7 +2763,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2550 2763
2551 /* Fall out from the NAPI loop if needed */ 2764 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2765 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2553#ifdef BCM_CNIC 2766
2554 /* No need to update SB for FCoE L2 ring as long as 2767 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB 2768 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled. 2769 * has been updated when NAPI was scheduled.
@@ -2559,8 +2772,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2559 napi_complete(napi); 2772 napi_complete(napi);
2560 break; 2773 break;
2561 } 2774 }
2562#endif
2563
2564 bnx2x_update_fpsb_idx(fp); 2775 bnx2x_update_fpsb_idx(fp);
2565 /* bnx2x_has_rx_work() reads the status block, 2776 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices 2777 * thus we need to ensure that status block indices
@@ -2940,7 +3151,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2940 txq_index = skb_get_queue_mapping(skb); 3151 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index); 3152 txq = netdev_get_tx_queue(dev, txq_index);
2942 3153
2943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 3154 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
2944 3155
2945 txdata = &bp->bnx2x_txq[txq_index]; 3156 txdata = &bp->bnx2x_txq[txq_index];
2946 3157
@@ -2958,11 +3169,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2958 BDS_PER_TX_PKT + 3169 BDS_PER_TX_PKT +
2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { 3170 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2960 /* Handle special storage cases separately */ 3171 /* Handle special storage cases separately */
2961 if (txdata->tx_ring_size != 0) { 3172 if (txdata->tx_ring_size == 0) {
2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 3173 struct bnx2x_eth_q_stats *q_stats =
3174 bnx2x_fp_qstats(bp, txdata->parent_fp);
3175 q_stats->driver_filtered_tx_pkt++;
3176 dev_kfree_skb(skb);
3177 return NETDEV_TX_OK;
3178 }
2963 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 3179 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2964 netif_tx_stop_queue(txq); 3180 netif_tx_stop_queue(txq);
2965 } 3181 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2966 3182
2967 return NETDEV_TX_BUSY; 3183 return NETDEV_TX_BUSY;
2968 } 3184 }
@@ -3339,13 +3555,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3339 return -EINVAL; 3555 return -EINVAL;
3340 } 3556 }
3341 3557
3342#ifdef BCM_CNIC
3343 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && 3558 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344 !is_zero_ether_addr(addr->sa_data)) { 3559 !is_zero_ether_addr(addr->sa_data)) {
3345 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3560 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3346 return -EINVAL; 3561 return -EINVAL;
3347 } 3562 }
3348#endif
3349 3563
3350 if (netif_running(dev)) { 3564 if (netif_running(dev)) {
3351 rc = bnx2x_set_eth_mac(bp, false); 3565 rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3583,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3369 u8 cos; 3583 u8 cos;
3370 3584
3371 /* Common */ 3585 /* Common */
3372#ifdef BCM_CNIC 3586
3373 if (IS_FCOE_IDX(fp_index)) { 3587 if (IS_FCOE_IDX(fp_index)) {
3374 memset(sb, 0, sizeof(union host_hc_status_block)); 3588 memset(sb, 0, sizeof(union host_hc_status_block));
3375 fp->status_blk_mapping = 0; 3589 fp->status_blk_mapping = 0;
3376
3377 } else { 3590 } else {
3378#endif
3379 /* status blocks */ 3591 /* status blocks */
3380 if (!CHIP_IS_E1x(bp)) 3592 if (!CHIP_IS_E1x(bp))
3381 BNX2X_PCI_FREE(sb->e2_sb, 3593 BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3599,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3387 bnx2x_fp(bp, fp_index, 3599 bnx2x_fp(bp, fp_index,
3388 status_blk_mapping), 3600 status_blk_mapping),
3389 sizeof(struct host_hc_status_block_e1x)); 3601 sizeof(struct host_hc_status_block_e1x));
3390#ifdef BCM_CNIC
3391 } 3602 }
3392#endif 3603
3393 /* Rx */ 3604 /* Rx */
3394 if (!skip_rx_queue(bp, fp_index)) { 3605 if (!skip_rx_queue(bp, fp_index)) {
3395 bnx2x_free_rx_bds(fp); 3606 bnx2x_free_rx_bds(fp);
@@ -3431,10 +3642,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3431 /* end of fastpath */ 3642 /* end of fastpath */
3432} 3643}
3433 3644
3645void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3646{
3647 int i;
3648 for_each_cnic_queue(bp, i)
3649 bnx2x_free_fp_mem_at(bp, i);
3650}
3651
3434void bnx2x_free_fp_mem(struct bnx2x *bp) 3652void bnx2x_free_fp_mem(struct bnx2x *bp)
3435{ 3653{
3436 int i; 3654 int i;
3437 for_each_queue(bp, i) 3655 for_each_eth_queue(bp, i)
3438 bnx2x_free_fp_mem_at(bp, i); 3656 bnx2x_free_fp_mem_at(bp, i);
3439} 3657}
3440 3658
@@ -3519,14 +3737,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3519 u8 cos; 3737 u8 cos;
3520 int rx_ring_size = 0; 3738 int rx_ring_size = 0;
3521 3739
3522#ifdef BCM_CNIC
3523 if (!bp->rx_ring_size && 3740 if (!bp->rx_ring_size &&
3524 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 3741 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525 rx_ring_size = MIN_RX_SIZE_NONTPA; 3742 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526 bp->rx_ring_size = rx_ring_size; 3743 bp->rx_ring_size = rx_ring_size;
3527 } else 3744 } else if (!bp->rx_ring_size) {
3528#endif
3529 if (!bp->rx_ring_size) {
3530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3745 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3531 3746
3532 if (CHIP_IS_E3(bp)) { 3747 if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3765,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3550 3765
3551 /* Common */ 3766 /* Common */
3552 sb = &bnx2x_fp(bp, index, status_blk); 3767 sb = &bnx2x_fp(bp, index, status_blk);
3553#ifdef BCM_CNIC 3768
3554 if (!IS_FCOE_IDX(index)) { 3769 if (!IS_FCOE_IDX(index)) {
3555#endif
3556 /* status blocks */ 3770 /* status blocks */
3557 if (!CHIP_IS_E1x(bp)) 3771 if (!CHIP_IS_E1x(bp))
3558 BNX2X_PCI_ALLOC(sb->e2_sb, 3772 BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3776,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3562 BNX2X_PCI_ALLOC(sb->e1x_sb, 3776 BNX2X_PCI_ALLOC(sb->e1x_sb,
3563 &bnx2x_fp(bp, index, status_blk_mapping), 3777 &bnx2x_fp(bp, index, status_blk_mapping),
3564 sizeof(struct host_hc_status_block_e1x)); 3778 sizeof(struct host_hc_status_block_e1x));
3565#ifdef BCM_CNIC
3566 } 3779 }
3567#endif
3568 3780
3569 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 3781 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570 * set shortcuts for it. 3782 * set shortcuts for it.
@@ -3641,31 +3853,31 @@ alloc_mem_err:
3641 return 0; 3853 return 0;
3642} 3854}
3643 3855
3856int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3857{
3858 if (!NO_FCOE(bp))
3859 /* FCoE */
3860 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3861 /* we will fail load process instead of mark
3862 * NO_FCOE_FLAG
3863 */
3864 return -ENOMEM;
3865
3866 return 0;
3867}
3868
3644int bnx2x_alloc_fp_mem(struct bnx2x *bp) 3869int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3645{ 3870{
3646 int i; 3871 int i;
3647 3872
3648 /** 3873 /* 1. Allocate FP for leading - fatal if error
3649 * 1. Allocate FP for leading - fatal if error 3874 * 2. Allocate RSS - fix number of queues if error
3650 * 2. {CNIC} Allocate FCoE FP - fatal if error
3651 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652 * 4. Allocate RSS - fix number of queues if error
3653 */ 3875 */
3654 3876
3655 /* leading */ 3877 /* leading */
3656 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3878 if (bnx2x_alloc_fp_mem_at(bp, 0))
3657 return -ENOMEM; 3879 return -ENOMEM;
3658 3880
3659#ifdef BCM_CNIC
3660 if (!NO_FCOE(bp))
3661 /* FCoE */
3662 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663 /* we will fail load process instead of mark
3664 * NO_FCOE_FLAG
3665 */
3666 return -ENOMEM;
3667#endif
3668
3669 /* RSS */ 3881 /* RSS */
3670 for_each_nondefault_eth_queue(bp, i) 3882 for_each_nondefault_eth_queue(bp, i)
3671 if (bnx2x_alloc_fp_mem_at(bp, i)) 3883 if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3888,18 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3676 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 3888 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3677 3889
3678 WARN_ON(delta < 0); 3890 WARN_ON(delta < 0);
3679#ifdef BCM_CNIC 3891 bnx2x_shrink_eth_fp(bp, delta);
3680 /** 3892 if (CNIC_SUPPORT(bp))
3681 * move non eth FPs next to last eth FP 3893 /* move non eth FPs next to last eth FP
3682 * must be done in that order 3894 * must be done in that order
3683 * FCOE_IDX < FWD_IDX < OOO_IDX 3895 * FCOE_IDX < FWD_IDX < OOO_IDX
3684 */ 3896 */
3685 3897
3686 /* move FCoE fp even NO_FCOE_FLAG is on */ 3898 /* move FCoE fp even NO_FCOE_FLAG is on */
3687 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 3899 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3688#endif 3900 bp->num_ethernet_queues -= delta;
3689 bp->num_queues -= delta; 3901 bp->num_queues = bp->num_ethernet_queues +
3902 bp->num_cnic_queues;
3690 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3903 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691 bp->num_queues + delta, bp->num_queues); 3904 bp->num_queues + delta, bp->num_queues);
3692 } 3905 }
@@ -3705,13 +3918,13 @@ void bnx2x_free_mem_bp(struct bnx2x *bp)
3705 kfree(bp->ilt); 3918 kfree(bp->ilt);
3706} 3919}
3707 3920
3708int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) 3921int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3709{ 3922{
3710 struct bnx2x_fastpath *fp; 3923 struct bnx2x_fastpath *fp;
3711 struct msix_entry *tbl; 3924 struct msix_entry *tbl;
3712 struct bnx2x_ilt *ilt; 3925 struct bnx2x_ilt *ilt;
3713 int msix_table_size = 0; 3926 int msix_table_size = 0;
3714 int fp_array_size; 3927 int fp_array_size, txq_array_size;
3715 int i; 3928 int i;
3716 3929
3717 /* 3930 /*
@@ -3721,7 +3934,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3721 msix_table_size = bp->igu_sb_cnt + 1; 3934 msix_table_size = bp->igu_sb_cnt + 1;
3722 3935
3723 /* fp array: RSS plus CNIC related L2 queues */ 3936 /* fp array: RSS plus CNIC related L2 queues */
3724 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; 3937 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3725 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); 3938 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3726 3939
3727 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); 3940 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3963,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3750 goto alloc_err; 3963 goto alloc_err;
3751 3964
3752 /* Allocate memory for the transmission queues array */ 3965 /* Allocate memory for the transmission queues array */
3753 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; 3966 txq_array_size =
3754#ifdef BCM_CNIC 3967 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3755 bp->bnx2x_txq_size++; 3968 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3756#endif 3969
3757 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, 3970 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3758 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); 3971 GFP_KERNEL);
3759 if (!bp->bnx2x_txq) 3972 if (!bp->bnx2x_txq)
3760 goto alloc_err; 3973 goto alloc_err;
3761 3974
@@ -3838,7 +4051,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3838 return LINK_CONFIG_IDX(sel_phy_idx); 4051 return LINK_CONFIG_IDX(sel_phy_idx);
3839} 4052}
3840 4053
3841#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 4054#ifdef NETDEV_FCOE_WWNN
3842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4055int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3843{ 4056{
3844 struct bnx2x *bp = netdev_priv(dev); 4057 struct bnx2x *bp = netdev_priv(dev);