aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMerav Sicron <meravs@broadcom.com>2012-11-06 19:45:48 -0500
committerDavid S. Miller <davem@davemloft.net>2012-11-07 18:57:19 -0500
commit55c11941e382cb26010138ab824216f47af37606 (patch)
tree92724ef130081b47426919758c5fac4061e9e708 /drivers
parentbabc6727d537199f7fbf6dfe711ae418d399b3eb (diff)
bnx2x: Support loading cnic resources at run-time
This patch replaces the BCM_CNIC define with a flag which can change at run-time and which does not use the CONFIG_CNIC kconfig option. For the PF/hypervisor driver cnic is always supported, however allocation of cnic resources and configuration of the HW for offload mode is done only when the cnic module registers bnx2x. Signed-off-by: Merav Sicron <meravs@broadcom.com> Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c460
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c837
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
12 files changed, 1084 insertions, 565 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 72897c47b8c8..de121ccd675e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,18 +34,10 @@
34 34
35#include "bnx2x_hsi.h" 35#include "bnx2x_hsi.h"
36 36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1
39#include "../cnic_if.h" 37#include "../cnic_if.h"
40#endif
41 38
42#ifdef BCM_CNIC 39
43#define BNX2X_MIN_MSIX_VEC_CNT 3 40#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
44#define BNX2X_MSIX_VEC_FP_START 2
45#else
46#define BNX2X_MIN_MSIX_VEC_CNT 2
47#define BNX2X_MSIX_VEC_FP_START 1
48#endif
49 41
50#include <linux/mdio.h> 42#include <linux/mdio.h>
51 43
@@ -256,15 +248,10 @@ enum {
256 /* FCoE L2 */ 248 /* FCoE L2 */
257#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) 249#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
258 250
259/** Additional rings budgeting */ 251#define CNIC_SUPPORT(bp) ((bp)->cnic_support)
260#ifdef BCM_CNIC 252#define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
261#define CNIC_PRESENT 1 253#define CNIC_LOADED(bp) ((bp)->cnic_loaded)
262#define FCOE_PRESENT 1 254#define FCOE_INIT(bp) ((bp)->fcoe_init)
263#else
264#define CNIC_PRESENT 0
265#define FCOE_PRESENT 0
266#endif /* BCM_CNIC */
267#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
268 255
269#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 256#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
270 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 257 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -297,9 +284,7 @@ enum {
297 OOO_TXQ_IDX_OFFSET, 284 OOO_TXQ_IDX_OFFSET,
298}; 285};
299#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) 286#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
300#ifdef BCM_CNIC
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) 287#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
302#endif
303 288
304/* fast path */ 289/* fast path */
305/* 290/*
@@ -585,15 +570,9 @@ struct bnx2x_fastpath {
585 ->var) 570 ->var)
586 571
587 572
588#define IS_ETH_FP(fp) (fp->index < \ 573#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
589 BNX2X_NUM_ETH_QUEUES(fp->bp)) 574#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
590#ifdef BCM_CNIC 575#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
591#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
592#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
593#else
594#define IS_FCOE_FP(fp) false
595#define IS_FCOE_IDX(idx) false
596#endif
597 576
598 577
599/* MC hsi */ 578/* MC hsi */
@@ -886,6 +865,18 @@ struct bnx2x_common {
886 (CHIP_REV(bp) == CHIP_REV_Bx)) 865 (CHIP_REV(bp) == CHIP_REV_Bx))
887#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ 866#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
888 (CHIP_REV(bp) == CHIP_REV_Ax)) 867 (CHIP_REV(bp) == CHIP_REV_Ax))
868/* This define is used in two main places:
869 * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
870 * to nic-only mode or to offload mode. Offload mode is configured if either the
871 * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
872 * registered for this port (which means that the user wants storage services).
873 * 2. During cnic-related load, to know if offload mode is already configured in
874 * the HW or needs to be configrued.
875 * Since the transition from nic-mode to offload-mode in HW causes traffic
876 * coruption, nic-mode is configured only in ports on which storage services
877 * where never requested.
878 */
879#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
889 880
890 int flash_size; 881 int flash_size;
891#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 882#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -1003,18 +994,15 @@ union cdu_context {
1003#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ 994#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
1004#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 995#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
1005 996
1006#ifdef BCM_CNIC
1007#define CNIC_ISCSI_CID_MAX 256 997#define CNIC_ISCSI_CID_MAX 256
1008#define CNIC_FCOE_CID_MAX 2048 998#define CNIC_FCOE_CID_MAX 2048
1009#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) 999#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
1010#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) 1000#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
1011#endif
1012 1001
1013#define QM_ILT_PAGE_SZ_HW 0 1002#define QM_ILT_PAGE_SZ_HW 0
1014#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ 1003#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
1015#define QM_CID_ROUND 1024 1004#define QM_CID_ROUND 1024
1016 1005
1017#ifdef BCM_CNIC
1018/* TM (timers) host DB constants */ 1006/* TM (timers) host DB constants */
1019#define TM_ILT_PAGE_SZ_HW 0 1007#define TM_ILT_PAGE_SZ_HW 0
1020#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1008#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
@@ -1032,8 +1020,6 @@ union cdu_context {
1032#define SRC_T2_SZ SRC_ILT_SZ 1020#define SRC_T2_SZ SRC_ILT_SZ
1033#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) 1021#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
1034 1022
1035#endif
1036
1037#define MAX_DMAE_C 8 1023#define MAX_DMAE_C 8
1038 1024
1039/* DMA memory not used in fastpath */ 1025/* DMA memory not used in fastpath */
@@ -1227,7 +1213,6 @@ struct bnx2x {
1227 struct bnx2x_sp_objs *sp_objs; 1213 struct bnx2x_sp_objs *sp_objs;
1228 struct bnx2x_fp_stats *fp_stats; 1214 struct bnx2x_fp_stats *fp_stats;
1229 struct bnx2x_fp_txdata *bnx2x_txq; 1215 struct bnx2x_fp_txdata *bnx2x_txq;
1230 int bnx2x_txq_size;
1231 void __iomem *regview; 1216 void __iomem *regview;
1232 void __iomem *doorbells; 1217 void __iomem *doorbells;
1233 u16 db_size; 1218 u16 db_size;
@@ -1350,6 +1335,15 @@ struct bnx2x {
1350#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1335#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
1351#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 1336#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
1352 1337
1338 u8 cnic_support;
1339 bool cnic_enabled;
1340 bool cnic_loaded;
1341
1342 /* Flag that indicates that we can start looking for FCoE L2 queue
1343 * completions in the default status block.
1344 */
1345 bool fcoe_init;
1346
1353 int pm_cap; 1347 int pm_cap;
1354 int mrrs; 1348 int mrrs;
1355 1349
@@ -1420,6 +1414,8 @@ struct bnx2x {
1420#define BNX2X_MAX_COS 3 1414#define BNX2X_MAX_COS 3
1421#define BNX2X_MAX_TX_COS 2 1415#define BNX2X_MAX_TX_COS 2
1422 int num_queues; 1416 int num_queues;
1417 uint num_ethernet_queues;
1418 uint num_cnic_queues;
1423 int num_napi_queues; 1419 int num_napi_queues;
1424 int disable_tpa; 1420 int disable_tpa;
1425 1421
@@ -1433,6 +1429,7 @@ struct bnx2x {
1433 u8 igu_dsb_id; 1429 u8 igu_dsb_id;
1434 u8 igu_base_sb; 1430 u8 igu_base_sb;
1435 u8 igu_sb_cnt; 1431 u8 igu_sb_cnt;
1432 u8 min_msix_vec_cnt;
1436 1433
1437 dma_addr_t def_status_blk_mapping; 1434 dma_addr_t def_status_blk_mapping;
1438 1435
@@ -1478,16 +1475,16 @@ struct bnx2x {
1478 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes 1475 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
1479 * to CNIC. 1476 * to CNIC.
1480 */ 1477 */
1481#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) 1478#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
1482 1479
1483/* 1480/*
1484 * Maximum CID count that might be required by the bnx2x: 1481 * Maximum CID count that might be required by the bnx2x:
1485 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI 1482 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1486 */ 1483 */
1487#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1484#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1488 + NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1485 + 2 * CNIC_SUPPORT(bp))
1489#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1486#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1490 + NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1487 + 2 * CNIC_SUPPORT(bp))
1491#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1488#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1492 ILT_PAGE_CIDS)) 1489 ILT_PAGE_CIDS))
1493 1490
@@ -1495,9 +1492,6 @@ struct bnx2x {
1495 1492
1496 int dropless_fc; 1493 int dropless_fc;
1497 1494
1498#ifdef BCM_CNIC
1499 u32 cnic_flags;
1500#define BNX2X_CNIC_FLAG_MAC_SET 1
1501 void *t2; 1495 void *t2;
1502 dma_addr_t t2_mapping; 1496 dma_addr_t t2_mapping;
1503 struct cnic_ops __rcu *cnic_ops; 1497 struct cnic_ops __rcu *cnic_ops;
@@ -1518,7 +1512,6 @@ struct bnx2x {
1518 1512
1519 /* Start index of the "special" (CNIC related) L2 cleints */ 1513 /* Start index of the "special" (CNIC related) L2 cleints */
1520 u8 cnic_base_cl_id; 1514 u8 cnic_base_cl_id;
1521#endif
1522 1515
1523 int dmae_ready; 1516 int dmae_ready;
1524 /* used to synchronize dmae accesses */ 1517 /* used to synchronize dmae accesses */
@@ -1647,9 +1640,9 @@ struct bnx2x {
1647/* Tx queues may be less or equal to Rx queues */ 1640/* Tx queues may be less or equal to Rx queues */
1648extern int num_queues; 1641extern int num_queues;
1649#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1642#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1650#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) 1643#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
1651#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ 1644#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1652 NON_ETH_CONTEXT_USE) 1645 (bp)->num_cnic_queues)
1653#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1646#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1654 1647
1655#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1648#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1689,6 +1682,13 @@ struct bnx2x_func_init_params {
1689 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ 1682 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1690}; 1683};
1691 1684
1685#define for_each_cnic_queue(bp, var) \
1686 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1687 (var)++) \
1688 if (skip_queue(bp, var)) \
1689 continue; \
1690 else
1691
1692#define for_each_eth_queue(bp, var) \ 1692#define for_each_eth_queue(bp, var) \
1693 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) 1693 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1694 1694
@@ -1702,6 +1702,22 @@ struct bnx2x_func_init_params {
1702 else 1702 else
1703 1703
1704/* Skip forwarding FP */ 1704/* Skip forwarding FP */
1705#define for_each_valid_rx_queue(bp, var) \
1706 for ((var) = 0; \
1707 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
1708 BNX2X_NUM_ETH_QUEUES(bp)); \
1709 (var)++) \
1710 if (skip_rx_queue(bp, var)) \
1711 continue; \
1712 else
1713
1714#define for_each_rx_queue_cnic(bp, var) \
1715 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1716 (var)++) \
1717 if (skip_rx_queue(bp, var)) \
1718 continue; \
1719 else
1720
1705#define for_each_rx_queue(bp, var) \ 1721#define for_each_rx_queue(bp, var) \
1706 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1722 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1707 if (skip_rx_queue(bp, var)) \ 1723 if (skip_rx_queue(bp, var)) \
@@ -1709,6 +1725,22 @@ struct bnx2x_func_init_params {
1709 else 1725 else
1710 1726
1711/* Skip OOO FP */ 1727/* Skip OOO FP */
1728#define for_each_valid_tx_queue(bp, var) \
1729 for ((var) = 0; \
1730 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
1731 BNX2X_NUM_ETH_QUEUES(bp)); \
1732 (var)++) \
1733 if (skip_tx_queue(bp, var)) \
1734 continue; \
1735 else
1736
1737#define for_each_tx_queue_cnic(bp, var) \
1738 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1739 (var)++) \
1740 if (skip_tx_queue(bp, var)) \
1741 continue; \
1742 else
1743
1712#define for_each_tx_queue(bp, var) \ 1744#define for_each_tx_queue(bp, var) \
1713 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1745 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1714 if (skip_tx_queue(bp, var)) \ 1746 if (skip_tx_queue(bp, var)) \
@@ -2179,7 +2211,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2179#define BNX2X_MF_SD_PROTOCOL(bp) \ 2211#define BNX2X_MF_SD_PROTOCOL(bp) \
2180 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) 2212 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
2181 2213
2182#ifdef BCM_CNIC
2183#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ 2214#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
2184 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) 2215 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
2185 2216
@@ -2196,9 +2227,12 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2196#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ 2227#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
2197 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2228 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2198 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2229 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2199#else
2200#define IS_MF_FCOE_AFEX(bp) false
2201#endif
2202 2230
2231enum {
2232 SWITCH_UPDATE,
2233 AFEX_UPDATE,
2234};
2235
2236#define NUM_MACS 8
2203 2237
2204#endif /* bnx2x.h */ 2238#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a9031c..54d522da1aa7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1152,6 +1152,25 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1152 } 1152 }
1153} 1153}
1154 1154
1155void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1156{
1157 int j;
1158
1159 for_each_rx_queue_cnic(bp, j) {
1160 struct bnx2x_fastpath *fp = &bp->fp[j];
1161
1162 fp->rx_bd_cons = 0;
1163
1164 /* Activate BD ring */
1165 /* Warning!
1166 * this will generate an interrupt (to the TSTORM)
1167 * must only be done after chip is initialized
1168 */
1169 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1170 fp->rx_sge_prod);
1171 }
1172}
1173
1155void bnx2x_init_rx_rings(struct bnx2x *bp) 1174void bnx2x_init_rx_rings(struct bnx2x *bp)
1156{ 1175{
1157 int func = BP_FUNC(bp); 1176 int func = BP_FUNC(bp);
@@ -1159,7 +1178,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1159 int i, j; 1178 int i, j;
1160 1179
1161 /* Allocate TPA resources */ 1180 /* Allocate TPA resources */
1162 for_each_rx_queue(bp, j) { 1181 for_each_eth_queue(bp, j) {
1163 struct bnx2x_fastpath *fp = &bp->fp[j]; 1182 struct bnx2x_fastpath *fp = &bp->fp[j];
1164 1183
1165 DP(NETIF_MSG_IFUP, 1184 DP(NETIF_MSG_IFUP,
@@ -1217,7 +1236,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1217 } 1236 }
1218 } 1237 }
1219 1238
1220 for_each_rx_queue(bp, j) { 1239 for_each_eth_queue(bp, j) {
1221 struct bnx2x_fastpath *fp = &bp->fp[j]; 1240 struct bnx2x_fastpath *fp = &bp->fp[j];
1222 1241
1223 fp->rx_bd_cons = 0; 1242 fp->rx_bd_cons = 0;
@@ -1244,29 +1263,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1244 } 1263 }
1245} 1264}
1246 1265
1247static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1266static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1248{ 1267{
1249 int i;
1250 u8 cos; 1268 u8 cos;
1269 struct bnx2x *bp = fp->bp;
1251 1270
1252 for_each_tx_queue(bp, i) { 1271 for_each_cos_in_tx_queue(fp, cos) {
1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1272 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1254 for_each_cos_in_tx_queue(fp, cos) { 1273 unsigned pkts_compl = 0, bytes_compl = 0;
1255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1257 1274
1258 u16 sw_prod = txdata->tx_pkt_prod; 1275 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons; 1276 u16 sw_cons = txdata->tx_pkt_cons;
1260 1277
1261 while (sw_cons != sw_prod) { 1278 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1279 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl); 1280 &pkts_compl, &bytes_compl);
1264 sw_cons++; 1281 sw_cons++;
1265 }
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
1269 } 1282 }
1283
1284 netdev_tx_reset_queue(
1285 netdev_get_tx_queue(bp->dev,
1286 txdata->txq_index));
1287 }
1288}
1289
1290static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1291{
1292 int i;
1293
1294 for_each_tx_queue_cnic(bp, i) {
1295 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1296 }
1297}
1298
1299static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1300{
1301 int i;
1302
1303 for_each_eth_queue(bp, i) {
1304 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1270 } 1305 }
1271} 1306}
1272 1307
@@ -1294,11 +1329,20 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1294 } 1329 }
1295} 1330}
1296 1331
1332static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1333{
1334 int j;
1335
1336 for_each_rx_queue_cnic(bp, j) {
1337 bnx2x_free_rx_bds(&bp->fp[j]);
1338 }
1339}
1340
1297static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1341static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298{ 1342{
1299 int j; 1343 int j;
1300 1344
1301 for_each_rx_queue(bp, j) { 1345 for_each_eth_queue(bp, j) {
1302 struct bnx2x_fastpath *fp = &bp->fp[j]; 1346 struct bnx2x_fastpath *fp = &bp->fp[j];
1303 1347
1304 bnx2x_free_rx_bds(fp); 1348 bnx2x_free_rx_bds(fp);
@@ -1308,6 +1352,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1308 } 1352 }
1309} 1353}
1310 1354
1355void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1356{
1357 bnx2x_free_tx_skbs_cnic(bp);
1358 bnx2x_free_rx_skbs_cnic(bp);
1359}
1360
1311void bnx2x_free_skbs(struct bnx2x *bp) 1361void bnx2x_free_skbs(struct bnx2x *bp)
1312{ 1362{
1313 bnx2x_free_tx_skbs(bp); 1363 bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1397,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1397 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348 bp->msix_table[offset].vector); 1398 bp->msix_table[offset].vector);
1349 offset++; 1399 offset++;
1350#ifdef BCM_CNIC 1400
1351 if (nvecs == offset) 1401 if (CNIC_SUPPORT(bp)) {
1352 return; 1402 if (nvecs == offset)
1353 offset++; 1403 return;
1354#endif 1404 offset++;
1405 }
1355 1406
1356 for_each_eth_queue(bp, i) { 1407 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset) 1408 if (nvecs == offset)
@@ -1368,7 +1419,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1368 if (bp->flags & USING_MSIX_FLAG && 1419 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG)) 1420 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1421 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371 CNIC_PRESENT + 1); 1422 CNIC_SUPPORT(bp) + 1);
1372 else 1423 else
1373 free_irq(bp->dev->irq, bp->dev); 1424 free_irq(bp->dev->irq, bp->dev);
1374} 1425}
@@ -1382,12 +1433,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1382 bp->msix_table[0].entry); 1433 bp->msix_table[0].entry);
1383 msix_vec++; 1434 msix_vec++;
1384 1435
1385#ifdef BCM_CNIC 1436 /* Cnic requires an msix vector for itself */
1386 bp->msix_table[msix_vec].entry = msix_vec; 1437 if (CNIC_SUPPORT(bp)) {
1387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1438 bp->msix_table[msix_vec].entry = msix_vec;
1388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1439 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1389 msix_vec++; 1440 msix_vec, bp->msix_table[msix_vec].entry);
1390#endif 1441 msix_vec++;
1442 }
1443
1391 /* We need separate vectors for ETH queues only (not FCoE) */ 1444 /* We need separate vectors for ETH queues only (not FCoE) */
1392 for_each_eth_queue(bp, i) { 1445 for_each_eth_queue(bp, i) {
1393 bp->msix_table[msix_vec].entry = msix_vec; 1446 bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1449,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1396 msix_vec++; 1449 msix_vec++;
1397 } 1450 }
1398 1451
1399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; 1452 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1400 1453
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1454 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402 1455
@@ -1404,7 +1457,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1404 * reconfigure number of tx/rx queues according to available 1457 * reconfigure number of tx/rx queues according to available
1405 * MSI-X vectors 1458 * MSI-X vectors
1406 */ 1459 */
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1460 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1408 /* how less vectors we will have? */ 1461 /* how less vectors we will have? */
1409 int diff = req_cnt - rc; 1462 int diff = req_cnt - rc;
1410 1463
@@ -1419,7 +1472,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1419 /* 1472 /*
1420 * decrease number of queues by number of unallocated entries 1473 * decrease number of queues by number of unallocated entries
1421 */ 1474 */
1422 bp->num_queues -= diff; 1475 bp->num_ethernet_queues -= diff;
1476 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1423 1477
1424 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1478 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1425 bp->num_queues); 1479 bp->num_queues);
@@ -1435,6 +1489,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1489 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG; 1490 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437 1491
1492 BNX2X_DEV_INFO("set number of queues to 1\n");
1493 bp->num_ethernet_queues = 1;
1494 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1438 } else if (rc < 0) { 1495 } else if (rc < 0) {
1439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1496 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1440 goto no_msix; 1497 goto no_msix;
@@ -1464,9 +1521,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1464 return -EBUSY; 1521 return -EBUSY;
1465 } 1522 }
1466 1523
1467#ifdef BCM_CNIC 1524 if (CNIC_SUPPORT(bp))
1468 offset++; 1525 offset++;
1469#endif 1526
1470 for_each_eth_queue(bp, i) { 1527 for_each_eth_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i]; 1528 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1529 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1542,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1485 } 1542 }
1486 1543
1487 i = BNX2X_NUM_ETH_QUEUES(bp); 1544 i = BNX2X_NUM_ETH_QUEUES(bp);
1488 offset = 1 + CNIC_PRESENT; 1545 offset = 1 + CNIC_SUPPORT(bp);
1489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1546 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1490 bp->msix_table[0].vector, 1547 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector, 1548 0, bp->msix_table[offset].vector,
@@ -1556,19 +1613,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
1556 return 0; 1613 return 0;
1557} 1614}
1558 1615
1616static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1617{
1618 int i;
1619
1620 for_each_rx_queue_cnic(bp, i)
1621 napi_enable(&bnx2x_fp(bp, i, napi));
1622}
1623
1559static void bnx2x_napi_enable(struct bnx2x *bp) 1624static void bnx2x_napi_enable(struct bnx2x *bp)
1560{ 1625{
1561 int i; 1626 int i;
1562 1627
1563 for_each_rx_queue(bp, i) 1628 for_each_eth_queue(bp, i)
1564 napi_enable(&bnx2x_fp(bp, i, napi)); 1629 napi_enable(&bnx2x_fp(bp, i, napi));
1565} 1630}
1566 1631
1632static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1633{
1634 int i;
1635
1636 for_each_rx_queue_cnic(bp, i)
1637 napi_disable(&bnx2x_fp(bp, i, napi));
1638}
1639
1567static void bnx2x_napi_disable(struct bnx2x *bp) 1640static void bnx2x_napi_disable(struct bnx2x *bp)
1568{ 1641{
1569 int i; 1642 int i;
1570 1643
1571 for_each_rx_queue(bp, i) 1644 for_each_eth_queue(bp, i)
1572 napi_disable(&bnx2x_fp(bp, i, napi)); 1645 napi_disable(&bnx2x_fp(bp, i, napi));
1573} 1646}
1574 1647
@@ -1576,6 +1649,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
1576{ 1649{
1577 if (netif_running(bp->dev)) { 1650 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp); 1651 bnx2x_napi_enable(bp);
1652 if (CNIC_LOADED(bp))
1653 bnx2x_napi_enable_cnic(bp);
1579 bnx2x_int_enable(bp); 1654 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN) 1655 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev); 1656 netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1661,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586{ 1661{
1587 bnx2x_int_disable_sync(bp, disable_hw); 1662 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp); 1663 bnx2x_napi_disable(bp);
1664 if (CNIC_LOADED(bp))
1665 bnx2x_napi_disable_cnic(bp);
1589} 1666}
1590 1667
1591u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1668u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592{ 1669{
1593 struct bnx2x *bp = netdev_priv(dev); 1670 struct bnx2x *bp = netdev_priv(dev);
1594 1671
1595#ifdef BCM_CNIC 1672 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1596 if (!NO_FCOE(bp)) {
1597 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1673 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto); 1674 u16 ether_type = ntohs(hdr->h_proto);
1599 1675
@@ -1609,7 +1685,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1685 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610 return bnx2x_fcoe_tx(bp, txq_index); 1686 return bnx2x_fcoe_tx(bp, txq_index);
1611 } 1687 }
1612#endif 1688
1613 /* select a non-FCoE queue */ 1689 /* select a non-FCoE queue */
1614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1690 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1615} 1691}
@@ -1618,15 +1694,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1618void bnx2x_set_num_queues(struct bnx2x *bp) 1694void bnx2x_set_num_queues(struct bnx2x *bp)
1619{ 1695{
1620 /* RSS queues */ 1696 /* RSS queues */
1621 bp->num_queues = bnx2x_calc_num_queues(bp); 1697 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1622 1698
1623#ifdef BCM_CNIC
1624 /* override in STORAGE SD modes */ 1699 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) 1700 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626 bp->num_queues = 1; 1701 bp->num_ethernet_queues = 1;
1627#endif 1702
1628 /* Add special queues */ 1703 /* Add special queues */
1629 bp->num_queues += NON_ETH_CONTEXT_USE; 1704 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1705 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1630 1706
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1707 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1632} 1708}
@@ -1653,20 +1729,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1729 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1730 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655 */ 1731 */
1656static int bnx2x_set_real_num_queues(struct bnx2x *bp) 1732static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1657{ 1733{
1658 int rc, tx, rx; 1734 int rc, tx, rx;
1659 1735
1660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1736 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; 1737 rx = BNX2X_NUM_ETH_QUEUES(bp);
1662 1738
1663/* account for fcoe queue */ 1739/* account for fcoe queue */
1664#ifdef BCM_CNIC 1740 if (include_cnic && !NO_FCOE(bp)) {
1665 if (!NO_FCOE(bp)) { 1741 rx++;
1666 rx += FCOE_PRESENT; 1742 tx++;
1667 tx += FCOE_PRESENT;
1668 } 1743 }
1669#endif
1670 1744
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1745 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672 if (rc) { 1746 if (rc) {
@@ -1859,14 +1933,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1859 (bp)->state = BNX2X_STATE_ERROR; \ 1933 (bp)->state = BNX2X_STATE_ERROR; \
1860 goto label; \ 1934 goto label; \
1861 } while (0) 1935 } while (0)
1862#else 1936
1937#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1938 do { \
1939 bp->cnic_loaded = false; \
1940 goto label; \
1941 } while (0)
1942#else /*BNX2X_STOP_ON_ERROR*/
1863#define LOAD_ERROR_EXIT(bp, label) \ 1943#define LOAD_ERROR_EXIT(bp, label) \
1864 do { \ 1944 do { \
1865 (bp)->state = BNX2X_STATE_ERROR; \ 1945 (bp)->state = BNX2X_STATE_ERROR; \
1866 (bp)->panic = 1; \ 1946 (bp)->panic = 1; \
1867 return -EBUSY; \ 1947 return -EBUSY; \
1868 } while (0) 1948 } while (0)
1869#endif 1949#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1950 do { \
1951 bp->cnic_loaded = false; \
1952 (bp)->panic = 1; \
1953 return -EBUSY; \
1954 } while (0)
1955#endif /*BNX2X_STOP_ON_ERROR*/
1870 1956
1871bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) 1957bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872{ 1958{
@@ -1959,10 +2045,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1959 fp->max_cos = 1; 2045 fp->max_cos = 1;
1960 2046
1961 /* Init txdata pointers */ 2047 /* Init txdata pointers */
1962#ifdef BCM_CNIC
1963 if (IS_FCOE_FP(fp)) 2048 if (IS_FCOE_FP(fp))
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2049 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965#endif
1966 if (IS_ETH_FP(fp)) 2050 if (IS_ETH_FP(fp))
1967 for_each_cos_in_tx_queue(fp, cos) 2051 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2052 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2064,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1980 else if (bp->flags & GRO_ENABLE_FLAG) 2064 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO; 2065 fp->mode = TPA_MODE_GRO;
1982 2066
1983#ifdef BCM_CNIC
1984 /* We don't want TPA on an FCoE L2 ring */ 2067 /* We don't want TPA on an FCoE L2 ring */
1985 if (IS_FCOE_FP(fp)) 2068 if (IS_FCOE_FP(fp))
1986 fp->disable_tpa = 1; 2069 fp->disable_tpa = 1;
1987#endif 2070}
2071
2072int bnx2x_load_cnic(struct bnx2x *bp)
2073{
2074 int i, rc, port = BP_PORT(bp);
2075
2076 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2077
2078 mutex_init(&bp->cnic_mutex);
2079
2080 rc = bnx2x_alloc_mem_cnic(bp);
2081 if (rc) {
2082 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2083 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2084 }
2085
2086 rc = bnx2x_alloc_fp_mem_cnic(bp);
2087 if (rc) {
2088 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2089 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2090 }
2091
2092 /* Update the number of queues with the cnic queues */
2093 rc = bnx2x_set_real_num_queues(bp, 1);
2094 if (rc) {
2095 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2096 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2097 }
2098
2099 /* Add all CNIC NAPI objects */
2100 bnx2x_add_all_napi_cnic(bp);
2101 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2102 bnx2x_napi_enable_cnic(bp);
2103
2104 rc = bnx2x_init_hw_func_cnic(bp);
2105 if (rc)
2106 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2107
2108 bnx2x_nic_init_cnic(bp);
2109
2110 /* Enable Timer scan */
2111 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2112
2113 for_each_cnic_queue(bp, i) {
2114 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2115 if (rc) {
2116 BNX2X_ERR("Queue setup failed\n");
2117 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2118 }
2119 }
2120
2121 /* Initialize Rx filter. */
2122 netif_addr_lock_bh(bp->dev);
2123 bnx2x_set_rx_mode(bp->dev);
2124 netif_addr_unlock_bh(bp->dev);
2125
2126 /* re-read iscsi info */
2127 bnx2x_get_iscsi_info(bp);
2128 bnx2x_setup_cnic_irq_info(bp);
2129 bnx2x_setup_cnic_info(bp);
2130 bp->cnic_loaded = true;
2131 if (bp->state == BNX2X_STATE_OPEN)
2132 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2133
2134
2135 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2136
2137 return 0;
2138
2139#ifndef BNX2X_STOP_ON_ERROR
2140load_error_cnic2:
2141 /* Disable Timer scan */
2142 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2143
2144load_error_cnic1:
2145 bnx2x_napi_disable_cnic(bp);
2146 /* Update the number of queues without the cnic queues */
2147 rc = bnx2x_set_real_num_queues(bp, 0);
2148 if (rc)
2149 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2150load_error_cnic0:
2151 BNX2X_ERR("CNIC-related load failed\n");
2152 bnx2x_free_fp_mem_cnic(bp);
2153 bnx2x_free_mem_cnic(bp);
2154 return rc;
2155#endif /* ! BNX2X_STOP_ON_ERROR */
1988} 2156}
1989 2157
1990 2158
@@ -1995,6 +2163,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1995 u32 load_code; 2163 u32 load_code;
1996 int i, rc; 2164 int i, rc;
1997 2165
2166 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2167 DP(NETIF_MSG_IFUP,
2168 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2169
1998#ifdef BNX2X_STOP_ON_ERROR 2170#ifdef BNX2X_STOP_ON_ERROR
1999 if (unlikely(bp->panic)) { 2171 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n"); 2172 BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2194,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2194 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023 for_each_queue(bp, i) 2195 for_each_queue(bp, i)
2024 bnx2x_bz_fp(bp, i); 2196 bnx2x_bz_fp(bp, i);
2025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * 2197 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2026 sizeof(struct bnx2x_fp_txdata)); 2198 bp->num_cnic_queues) *
2199 sizeof(struct bnx2x_fp_txdata));
2027 2200
2201 bp->fcoe_init = false;
2028 2202
2029 /* Set the receive queues buffer size */ 2203 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp); 2204 bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2208,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2034 2208
2035 /* As long as bnx2x_alloc_mem() may possibly update 2209 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always 2210 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 * come after it. 2211 * come after it. At this stage cnic queues are not counted.
2038 */ 2212 */
2039 rc = bnx2x_set_real_num_queues(bp); 2213 rc = bnx2x_set_real_num_queues(bp, 0);
2040 if (rc) { 2214 if (rc) {
2041 BNX2X_ERR("Unable to set real_num_queues\n"); 2215 BNX2X_ERR("Unable to set real_num_queues\n");
2042 LOAD_ERROR_EXIT(bp, load_error0); 2216 LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2224,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2050 2224
2051 /* Add all NAPI objects */ 2225 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp); 2226 bnx2x_add_all_napi(bp);
2227 DP(NETIF_MSG_IFUP, "napi added\n");
2053 bnx2x_napi_enable(bp); 2228 bnx2x_napi_enable(bp);
2054 2229
2055 /* set pf load just before approaching the MCP */ 2230 /* set pf load just before approaching the MCP */
@@ -2191,23 +2366,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2191 LOAD_ERROR_EXIT(bp, load_error3); 2366 LOAD_ERROR_EXIT(bp, load_error3);
2192 } 2367 }
2193 2368
2194#ifdef BCM_CNIC 2369 for_each_nondefault_eth_queue(bp, i) {
2195 /* Enable Timer scan */
2196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197#endif
2198
2199 for_each_nondefault_queue(bp, i) {
2200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2370 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2201 if (rc) { 2371 if (rc) {
2202 BNX2X_ERR("Queue setup failed\n"); 2372 BNX2X_ERR("Queue setup failed\n");
2203 LOAD_ERROR_EXIT(bp, load_error4); 2373 LOAD_ERROR_EXIT(bp, load_error3);
2204 } 2374 }
2205 } 2375 }
2206 2376
2207 rc = bnx2x_init_rss_pf(bp); 2377 rc = bnx2x_init_rss_pf(bp);
2208 if (rc) { 2378 if (rc) {
2209 BNX2X_ERR("PF RSS init failed\n"); 2379 BNX2X_ERR("PF RSS init failed\n");
2210 LOAD_ERROR_EXIT(bp, load_error4); 2380 LOAD_ERROR_EXIT(bp, load_error3);
2211 } 2381 }
2212 2382
2213 /* Now when Clients are configured we are ready to work */ 2383 /* Now when Clients are configured we are ready to work */
@@ -2217,7 +2387,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2217 rc = bnx2x_set_eth_mac(bp, true); 2387 rc = bnx2x_set_eth_mac(bp, true);
2218 if (rc) { 2388 if (rc) {
2219 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2389 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220 LOAD_ERROR_EXIT(bp, load_error4); 2390 LOAD_ERROR_EXIT(bp, load_error3);
2221 } 2391 }
2222 2392
2223 if (bp->pending_max) { 2393 if (bp->pending_max) {
@@ -2264,14 +2434,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2264 /* start the timer */ 2434 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval); 2435 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266 2436
2267#ifdef BCM_CNIC 2437 if (CNIC_ENABLED(bp))
2268 /* re-read iscsi info */ 2438 bnx2x_load_cnic(bp);
2269 bnx2x_get_iscsi_info(bp);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274#endif
2275 2439
2276 /* mark driver is loaded in shmem2 */ 2440 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2441 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2457,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2457 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false); 2458 bnx2x_dcbx_init(bp, false);
2295 2459
2460 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2461
2296 return 0; 2462 return 0;
2297 2463
2298#ifndef BNX2X_STOP_ON_ERROR 2464#ifndef BNX2X_STOP_ON_ERROR
2299load_error4:
2300#ifdef BCM_CNIC
2301 /* Disable Timer scan */
2302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303#endif
2304load_error3: 2465load_error3:
2305 bnx2x_int_disable_sync(bp, 1); 2466 bnx2x_int_disable_sync(bp, 1);
2306 2467
@@ -2338,6 +2499,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2338 int i; 2499 int i;
2339 bool global = false; 2500 bool global = false;
2340 2501
2502 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2503
2341 /* mark driver is unloaded in shmem2 */ 2504 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2505 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343 u32 val; 2506 u32 val;
@@ -2373,14 +2536,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2536 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374 smp_mb(); 2537 smp_mb();
2375 2538
2539 if (CNIC_LOADED(bp))
2540 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2541
2376 /* Stop Tx */ 2542 /* Stop Tx */
2377 bnx2x_tx_disable(bp); 2543 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->dev); 2544 netdev_reset_tc(bp->dev);
2379 2545
2380#ifdef BCM_CNIC
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382#endif
2383
2384 bp->rx_mode = BNX2X_RX_MODE_NONE; 2546 bp->rx_mode = BNX2X_RX_MODE_NONE;
2385 2547
2386 del_timer_sync(&bp->timer); 2548 del_timer_sync(&bp->timer);
@@ -2414,7 +2576,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2414 bnx2x_netif_stop(bp, 1); 2576 bnx2x_netif_stop(bp, 1);
2415 /* Delete all NAPI objects */ 2577 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp); 2578 bnx2x_del_all_napi(bp);
2417 2579 if (CNIC_LOADED(bp))
2580 bnx2x_del_all_napi_cnic(bp);
2418 /* Release IRQs */ 2581 /* Release IRQs */
2419 bnx2x_free_irq(bp); 2582 bnx2x_free_irq(bp);
2420 2583
@@ -2435,12 +2598,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2435 2598
2436 /* Free SKBs, SGEs, TPA pool and driver internals */ 2599 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp); 2600 bnx2x_free_skbs(bp);
2601 if (CNIC_LOADED(bp))
2602 bnx2x_free_skbs_cnic(bp);
2438 for_each_rx_queue(bp, i) 2603 for_each_rx_queue(bp, i)
2439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2604 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2440 2605
2606 if (CNIC_LOADED(bp)) {
2607 bnx2x_free_fp_mem_cnic(bp);
2608 bnx2x_free_mem_cnic(bp);
2609 }
2441 bnx2x_free_mem(bp); 2610 bnx2x_free_mem(bp);
2442 2611
2443 bp->state = BNX2X_STATE_CLOSED; 2612 bp->state = BNX2X_STATE_CLOSED;
2613 bp->cnic_loaded = false;
2444 2614
2445 /* Check if there are pending parity attentions. If there are - set 2615 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS. 2616 * RECOVERY_IN_PROGRESS.
@@ -2460,6 +2630,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2630 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461 bnx2x_disable_close_the_gate(bp); 2631 bnx2x_disable_close_the_gate(bp);
2462 2632
2633 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2634
2463 return 0; 2635 return 0;
2464} 2636}
2465 2637
@@ -2550,7 +2722,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2550 2722
2551 /* Fall out from the NAPI loop if needed */ 2723 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2724 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2553#ifdef BCM_CNIC 2725
2554 /* No need to update SB for FCoE L2 ring as long as 2726 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB 2727 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled. 2728 * has been updated when NAPI was scheduled.
@@ -2559,8 +2731,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2559 napi_complete(napi); 2731 napi_complete(napi);
2560 break; 2732 break;
2561 } 2733 }
2562#endif
2563
2564 bnx2x_update_fpsb_idx(fp); 2734 bnx2x_update_fpsb_idx(fp);
2565 /* bnx2x_has_rx_work() reads the status block, 2735 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices 2736 * thus we need to ensure that status block indices
@@ -2940,7 +3110,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2940 txq_index = skb_get_queue_mapping(skb); 3110 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index); 3111 txq = netdev_get_tx_queue(dev, txq_index);
2942 3112
2943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 3113 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
2944 3114
2945 txdata = &bp->bnx2x_txq[txq_index]; 3115 txdata = &bp->bnx2x_txq[txq_index];
2946 3116
@@ -3339,13 +3509,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3339 return -EINVAL; 3509 return -EINVAL;
3340 } 3510 }
3341 3511
3342#ifdef BCM_CNIC
3343 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && 3512 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344 !is_zero_ether_addr(addr->sa_data)) { 3513 !is_zero_ether_addr(addr->sa_data)) {
3345 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3514 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3346 return -EINVAL; 3515 return -EINVAL;
3347 } 3516 }
3348#endif
3349 3517
3350 if (netif_running(dev)) { 3518 if (netif_running(dev)) {
3351 rc = bnx2x_set_eth_mac(bp, false); 3519 rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3537,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3369 u8 cos; 3537 u8 cos;
3370 3538
3371 /* Common */ 3539 /* Common */
3372#ifdef BCM_CNIC 3540
3373 if (IS_FCOE_IDX(fp_index)) { 3541 if (IS_FCOE_IDX(fp_index)) {
3374 memset(sb, 0, sizeof(union host_hc_status_block)); 3542 memset(sb, 0, sizeof(union host_hc_status_block));
3375 fp->status_blk_mapping = 0; 3543 fp->status_blk_mapping = 0;
3376
3377 } else { 3544 } else {
3378#endif
3379 /* status blocks */ 3545 /* status blocks */
3380 if (!CHIP_IS_E1x(bp)) 3546 if (!CHIP_IS_E1x(bp))
3381 BNX2X_PCI_FREE(sb->e2_sb, 3547 BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3553,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3387 bnx2x_fp(bp, fp_index, 3553 bnx2x_fp(bp, fp_index,
3388 status_blk_mapping), 3554 status_blk_mapping),
3389 sizeof(struct host_hc_status_block_e1x)); 3555 sizeof(struct host_hc_status_block_e1x));
3390#ifdef BCM_CNIC
3391 } 3556 }
3392#endif 3557
3393 /* Rx */ 3558 /* Rx */
3394 if (!skip_rx_queue(bp, fp_index)) { 3559 if (!skip_rx_queue(bp, fp_index)) {
3395 bnx2x_free_rx_bds(fp); 3560 bnx2x_free_rx_bds(fp);
@@ -3431,10 +3596,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3431 /* end of fastpath */ 3596 /* end of fastpath */
3432} 3597}
3433 3598
3599void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3600{
3601 int i;
3602 for_each_cnic_queue(bp, i)
3603 bnx2x_free_fp_mem_at(bp, i);
3604}
3605
3434void bnx2x_free_fp_mem(struct bnx2x *bp) 3606void bnx2x_free_fp_mem(struct bnx2x *bp)
3435{ 3607{
3436 int i; 3608 int i;
3437 for_each_queue(bp, i) 3609 for_each_eth_queue(bp, i)
3438 bnx2x_free_fp_mem_at(bp, i); 3610 bnx2x_free_fp_mem_at(bp, i);
3439} 3611}
3440 3612
@@ -3519,14 +3691,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3519 u8 cos; 3691 u8 cos;
3520 int rx_ring_size = 0; 3692 int rx_ring_size = 0;
3521 3693
3522#ifdef BCM_CNIC
3523 if (!bp->rx_ring_size && 3694 if (!bp->rx_ring_size &&
3524 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 3695 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525 rx_ring_size = MIN_RX_SIZE_NONTPA; 3696 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526 bp->rx_ring_size = rx_ring_size; 3697 bp->rx_ring_size = rx_ring_size;
3527 } else 3698 } else if (!bp->rx_ring_size) {
3528#endif
3529 if (!bp->rx_ring_size) {
3530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3699 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3531 3700
3532 if (CHIP_IS_E3(bp)) { 3701 if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3719,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3550 3719
3551 /* Common */ 3720 /* Common */
3552 sb = &bnx2x_fp(bp, index, status_blk); 3721 sb = &bnx2x_fp(bp, index, status_blk);
3553#ifdef BCM_CNIC 3722
3554 if (!IS_FCOE_IDX(index)) { 3723 if (!IS_FCOE_IDX(index)) {
3555#endif
3556 /* status blocks */ 3724 /* status blocks */
3557 if (!CHIP_IS_E1x(bp)) 3725 if (!CHIP_IS_E1x(bp))
3558 BNX2X_PCI_ALLOC(sb->e2_sb, 3726 BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3730,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3562 BNX2X_PCI_ALLOC(sb->e1x_sb, 3730 BNX2X_PCI_ALLOC(sb->e1x_sb,
3563 &bnx2x_fp(bp, index, status_blk_mapping), 3731 &bnx2x_fp(bp, index, status_blk_mapping),
3564 sizeof(struct host_hc_status_block_e1x)); 3732 sizeof(struct host_hc_status_block_e1x));
3565#ifdef BCM_CNIC
3566 } 3733 }
3567#endif
3568 3734
3569 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 3735 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570 * set shortcuts for it. 3736 * set shortcuts for it.
@@ -3641,31 +3807,31 @@ alloc_mem_err:
3641 return 0; 3807 return 0;
3642} 3808}
3643 3809
3810int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3811{
3812 if (!NO_FCOE(bp))
3813 /* FCoE */
3814 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3815 /* we will fail load process instead of mark
3816 * NO_FCOE_FLAG
3817 */
3818 return -ENOMEM;
3819
3820 return 0;
3821}
3822
3644int bnx2x_alloc_fp_mem(struct bnx2x *bp) 3823int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3645{ 3824{
3646 int i; 3825 int i;
3647 3826
3648 /** 3827 /* 1. Allocate FP for leading - fatal if error
3649 * 1. Allocate FP for leading - fatal if error 3828 * 2. Allocate RSS - fix number of queues if error
3650 * 2. {CNIC} Allocate FCoE FP - fatal if error
3651 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652 * 4. Allocate RSS - fix number of queues if error
3653 */ 3829 */
3654 3830
3655 /* leading */ 3831 /* leading */
3656 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3832 if (bnx2x_alloc_fp_mem_at(bp, 0))
3657 return -ENOMEM; 3833 return -ENOMEM;
3658 3834
3659#ifdef BCM_CNIC
3660 if (!NO_FCOE(bp))
3661 /* FCoE */
3662 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663 /* we will fail load process instead of mark
3664 * NO_FCOE_FLAG
3665 */
3666 return -ENOMEM;
3667#endif
3668
3669 /* RSS */ 3835 /* RSS */
3670 for_each_nondefault_eth_queue(bp, i) 3836 for_each_nondefault_eth_queue(bp, i)
3671 if (bnx2x_alloc_fp_mem_at(bp, i)) 3837 if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3842,17 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3676 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 3842 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3677 3843
3678 WARN_ON(delta < 0); 3844 WARN_ON(delta < 0);
3679#ifdef BCM_CNIC 3845 if (CNIC_SUPPORT(bp))
3680 /** 3846 /* move non eth FPs next to last eth FP
3681 * move non eth FPs next to last eth FP 3847 * must be done in that order
3682 * must be done in that order 3848 * FCOE_IDX < FWD_IDX < OOO_IDX
3683 * FCOE_IDX < FWD_IDX < OOO_IDX 3849 */
3684 */
3685 3850
3686 /* move FCoE fp even NO_FCOE_FLAG is on */ 3851 /* move FCoE fp even NO_FCOE_FLAG is on */
3687 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 3852 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3688#endif 3853 bp->num_ethernet_queues -= delta;
3689 bp->num_queues -= delta; 3854 bp->num_queues = bp->num_ethernet_queues +
3855 bp->num_cnic_queues;
3690 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3856 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691 bp->num_queues + delta, bp->num_queues); 3857 bp->num_queues + delta, bp->num_queues);
3692 } 3858 }
@@ -3711,7 +3877,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3711 struct msix_entry *tbl; 3877 struct msix_entry *tbl;
3712 struct bnx2x_ilt *ilt; 3878 struct bnx2x_ilt *ilt;
3713 int msix_table_size = 0; 3879 int msix_table_size = 0;
3714 int fp_array_size; 3880 int fp_array_size, txq_array_size;
3715 int i; 3881 int i;
3716 3882
3717 /* 3883 /*
@@ -3721,7 +3887,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3721 msix_table_size = bp->igu_sb_cnt + 1; 3887 msix_table_size = bp->igu_sb_cnt + 1;
3722 3888
3723 /* fp array: RSS plus CNIC related L2 queues */ 3889 /* fp array: RSS plus CNIC related L2 queues */
3724 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; 3890 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3725 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); 3891 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3726 3892
3727 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); 3893 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3916,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3750 goto alloc_err; 3916 goto alloc_err;
3751 3917
3752 /* Allocate memory for the transmission queues array */ 3918 /* Allocate memory for the transmission queues array */
3753 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; 3919 txq_array_size =
3754#ifdef BCM_CNIC 3920 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3755 bp->bnx2x_txq_size++; 3921 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3756#endif 3922
3757 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, 3923 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3758 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); 3924 GFP_KERNEL);
3759 if (!bp->bnx2x_txq) 3925 if (!bp->bnx2x_txq)
3760 goto alloc_err; 3926 goto alloc_err;
3761 3927
@@ -3838,7 +4004,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3838 return LINK_CONFIG_IDX(sel_phy_idx); 4004 return LINK_CONFIG_IDX(sel_phy_idx);
3839} 4005}
3840 4006
3841#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 4007#ifdef NETDEV_FCOE_WWNN
3842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4008int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3843{ 4009{
3844 struct bnx2x *bp = netdev_priv(dev); 4010 struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 9c5ea6c5b4c7..ad280740b134 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -238,7 +238,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
238 * @dev_instance: private instance 238 * @dev_instance: private instance
239 */ 239 */
240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
241#ifdef BCM_CNIC
242 241
243/** 242/**
244 * bnx2x_cnic_notify - send command to cnic driver 243 * bnx2x_cnic_notify - send command to cnic driver
@@ -262,8 +261,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
262 */ 261 */
263void bnx2x_setup_cnic_info(struct bnx2x *bp); 262void bnx2x_setup_cnic_info(struct bnx2x *bp);
264 263
265#endif
266
267/** 264/**
268 * bnx2x_int_enable - enable HW interrupts. 265 * bnx2x_int_enable - enable HW interrupts.
269 * 266 *
@@ -283,7 +280,7 @@ void bnx2x_int_enable(struct bnx2x *bp);
283void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 280void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
284 281
285/** 282/**
286 * bnx2x_nic_init - init driver internals. 283 * bnx2x_nic_init_cnic - init driver internals for cnic.
287 * 284 *
288 * @bp: driver handle 285 * @bp: driver handle
289 * @load_code: COMMON, PORT or FUNCTION 286 * @load_code: COMMON, PORT or FUNCTION
@@ -293,9 +290,26 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
293 * - status blocks 290 * - status blocks
294 * - etc. 291 * - etc.
295 */ 292 */
296void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 293void bnx2x_nic_init_cnic(struct bnx2x *bp);
297 294
298/** 295/**
296 * bnx2x_nic_init - init driver internals.
297 *
298 * @bp: driver handle
299 *
300 * Initializes:
301 * - rings
302 * - status blocks
303 * - etc.
304 */
305void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
306/**
307 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
308 *
309 * @bp: driver handle
310 */
311int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
312/**
299 * bnx2x_alloc_mem - allocate driver's memory. 313 * bnx2x_alloc_mem - allocate driver's memory.
300 * 314 *
301 * @bp: driver handle 315 * @bp: driver handle
@@ -303,6 +317,12 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
303int bnx2x_alloc_mem(struct bnx2x *bp); 317int bnx2x_alloc_mem(struct bnx2x *bp);
304 318
305/** 319/**
320 * bnx2x_free_mem_cnic - release driver's memory for cnic.
321 *
322 * @bp: driver handle
323 */
324void bnx2x_free_mem_cnic(struct bnx2x *bp);
325/**
306 * bnx2x_free_mem - release driver's memory. 326 * bnx2x_free_mem - release driver's memory.
307 * 327 *
308 * @bp: driver handle 328 * @bp: driver handle
@@ -407,6 +427,7 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
407void bnx2x_set_reset_in_progress(struct bnx2x *bp); 427void bnx2x_set_reset_in_progress(struct bnx2x *bp);
408void bnx2x_set_reset_global(struct bnx2x *bp); 428void bnx2x_set_reset_global(struct bnx2x *bp);
409void bnx2x_disable_close_the_gate(struct bnx2x *bp); 429void bnx2x_disable_close_the_gate(struct bnx2x *bp);
430int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
410 431
411/** 432/**
412 * bnx2x_sp_event - handle ramrods completion. 433 * bnx2x_sp_event - handle ramrods completion.
@@ -424,6 +445,14 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
424void bnx2x_ilt_set_info(struct bnx2x *bp); 445void bnx2x_ilt_set_info(struct bnx2x *bp);
425 446
426/** 447/**
448 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
449 * and TM.
450 *
451 * @bp: driver handle
452 */
453void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
454
455/**
427 * bnx2x_dcbx_init - initialize dcbx protocol. 456 * bnx2x_dcbx_init - initialize dcbx protocol.
428 * 457 *
429 * @bp: driver handle 458 * @bp: driver handle
@@ -491,12 +520,17 @@ int bnx2x_resume(struct pci_dev *pdev);
491/* Release IRQ vectors */ 520/* Release IRQ vectors */
492void bnx2x_free_irq(struct bnx2x *bp); 521void bnx2x_free_irq(struct bnx2x *bp);
493 522
523void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
494void bnx2x_free_fp_mem(struct bnx2x *bp); 524void bnx2x_free_fp_mem(struct bnx2x *bp);
525int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
495int bnx2x_alloc_fp_mem(struct bnx2x *bp); 526int bnx2x_alloc_fp_mem(struct bnx2x *bp);
496void bnx2x_init_rx_rings(struct bnx2x *bp); 527void bnx2x_init_rx_rings(struct bnx2x *bp);
528void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
529void bnx2x_free_skbs_cnic(struct bnx2x *bp);
497void bnx2x_free_skbs(struct bnx2x *bp); 530void bnx2x_free_skbs(struct bnx2x *bp);
498void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 531void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
499void bnx2x_netif_start(struct bnx2x *bp); 532void bnx2x_netif_start(struct bnx2x *bp);
533int bnx2x_load_cnic(struct bnx2x *bp);
500 534
501/** 535/**
502 * bnx2x_enable_msix - set msix configuration. 536 * bnx2x_enable_msix - set msix configuration.
@@ -547,7 +581,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
547 */ 581 */
548int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 582int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
549 583
550#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 584#ifdef NETDEV_FCOE_WWNN
551/** 585/**
552 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 586 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
553 * 587 *
@@ -793,23 +827,39 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
793 sge->addr_lo = 0; 827 sge->addr_lo = 0;
794} 828}
795 829
796static inline void bnx2x_add_all_napi(struct bnx2x *bp) 830static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
797{ 831{
798 int i; 832 int i;
799 833
800 bp->num_napi_queues = bp->num_queues; 834 /* Add NAPI objects */
835 for_each_rx_queue_cnic(bp, i)
836 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
837 bnx2x_poll, BNX2X_NAPI_WEIGHT);
838}
839
840static inline void bnx2x_add_all_napi(struct bnx2x *bp)
841{
842 int i;
801 843
802 /* Add NAPI objects */ 844 /* Add NAPI objects */
803 for_each_rx_queue(bp, i) 845 for_each_eth_queue(bp, i)
804 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 846 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
805 bnx2x_poll, BNX2X_NAPI_WEIGHT); 847 bnx2x_poll, BNX2X_NAPI_WEIGHT);
806} 848}
807 849
850static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
851{
852 int i;
853
854 for_each_rx_queue_cnic(bp, i)
855 netif_napi_del(&bnx2x_fp(bp, i, napi));
856}
857
808static inline void bnx2x_del_all_napi(struct bnx2x *bp) 858static inline void bnx2x_del_all_napi(struct bnx2x *bp)
809{ 859{
810 int i; 860 int i;
811 861
812 for_each_rx_queue(bp, i) 862 for_each_eth_queue(bp, i)
813 netif_napi_del(&bnx2x_fp(bp, i, napi)); 863 netif_napi_del(&bnx2x_fp(bp, i, napi));
814} 864}
815 865
@@ -979,11 +1029,9 @@ static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
979{ 1029{
980 struct bnx2x *bp = fp->bp; 1030 struct bnx2x *bp = fp->bp;
981 if (!CHIP_IS_E1x(bp)) { 1031 if (!CHIP_IS_E1x(bp)) {
982#ifdef BCM_CNIC
983 /* there are special statistics counters for FCoE 136..140 */ 1032 /* there are special statistics counters for FCoE 136..140 */
984 if (IS_FCOE_FP(fp)) 1033 if (IS_FCOE_FP(fp))
985 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1034 return bp->cnic_base_cl_id + (bp->pf_num >> 1);
986#endif
987 return fp->cl_id; 1035 return fp->cl_id;
988 } 1036 }
989 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1037 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
@@ -1102,7 +1150,6 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
1102 txdata->cid, txdata->txq_index); 1150 txdata->cid, txdata->txq_index);
1103} 1151}
1104 1152
1105#ifdef BCM_CNIC
1106static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1153static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1107{ 1154{
1108 return bp->cnic_base_cl_id + cl_idx + 1155 return bp->cnic_base_cl_id + cl_idx +
@@ -1162,7 +1209,6 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1162 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1209 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1163 fp->igu_sb_id); 1210 fp->igu_sb_id);
1164} 1211}
1165#endif
1166 1212
1167static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1213static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1168 struct bnx2x_fp_txdata *txdata) 1214 struct bnx2x_fp_txdata *txdata)
@@ -1280,7 +1326,7 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1280 */ 1326 */
1281 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1327 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1282} 1328}
1283#ifdef BCM_CNIC 1329
1284/** 1330/**
1285 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1331 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
1286 * 1332 *
@@ -1288,7 +1334,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1288 * 1334 *
1289 */ 1335 */
1290void bnx2x_get_iscsi_info(struct bnx2x *bp); 1336void bnx2x_get_iscsi_info(struct bnx2x *bp);
1291#endif
1292 1337
1293/** 1338/**
1294 * bnx2x_link_sync_notify - send notification to other functions. 1339 * bnx2x_link_sync_notify - send notification to other functions.
@@ -1340,13 +1385,11 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1340 1385
1341static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1386static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1342{ 1387{
1343 if (is_valid_ether_addr(addr)) 1388 if (is_valid_ether_addr(addr) ||
1389 (is_zero_ether_addr(addr) &&
1390 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
1344 return true; 1391 return true;
1345#ifdef BCM_CNIC 1392
1346 if (is_zero_ether_addr(addr) &&
1347 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1348 return true;
1349#endif
1350 return false; 1393 return false;
1351} 1394}
1352 1395
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2245c3895409..cba4a16ab86a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1908,10 +1908,10 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1908 /* first the HW mac address */ 1908 /* first the HW mac address */
1909 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); 1909 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
1910 1910
1911#ifdef BCM_CNIC 1911 if (CNIC_LOADED(bp))
1912 /* second SAN address */ 1912 /* second SAN address */
1913 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); 1913 memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
1914#endif 1914 netdev->addr_len);
1915} 1915}
1916 1916
1917static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, 1917static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c65295dded39..ec3f9e5187df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2901,7 +2901,9 @@ static void bnx2x_get_channels(struct net_device *dev,
2901static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 2901static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2902{ 2902{
2903 bnx2x_disable_msi(bp); 2903 bnx2x_disable_msi(bp);
2904 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; 2904 bp->num_ethernet_queues = num_rss;
2905 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
2906 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
2905 bnx2x_set_int_mode(bp); 2907 bnx2x_set_int_mode(bp);
2906} 2908}
2907 2909
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index fe66d902dc62..d755acfe7a40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -648,15 +648,25 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
648 return rc; 648 return rc;
649} 649}
650 650
651static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
652{
653 int rc = 0;
654
655 if (CONFIGURE_NIC_MODE(bp))
656 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
657 if (!rc)
658 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
659
660 return rc;
661}
662
651static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) 663static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
652{ 664{
653 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); 665 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
654 if (!rc) 666 if (!rc)
655 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); 667 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
656 if (!rc) 668 if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
657 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); 669 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
658 if (!rc)
659 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
660 670
661 return rc; 671 return rc;
662} 672}
@@ -781,12 +791,19 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
781 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); 791 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
782} 792}
783 793
794static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
795{
796 if (CONFIGURE_NIC_MODE(bp))
797 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
798 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
799}
800
784static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) 801static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
785{ 802{
786 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); 803 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
787 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); 804 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
788 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); 805 if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
789 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); 806 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
790} 807}
791 808
792static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, 809static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
@@ -890,7 +907,6 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
890/**************************************************************************** 907/****************************************************************************
891* SRC initializations 908* SRC initializations
892****************************************************************************/ 909****************************************************************************/
893#ifdef BCM_CNIC
894/* called during init func stage */ 910/* called during init func stage */
895static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 911static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
896 dma_addr_t t2_mapping, int src_cid_count) 912 dma_addr_t t2_mapping, int src_cid_count)
@@ -915,5 +931,4 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
915 U64_HI((u64)t2_mapping + 931 U64_HI((u64)t2_mapping +
916 (src_cid_count-1) * sizeof(struct src_ent))); 932 (src_cid_count-1) * sizeof(struct src_ent)));
917} 933}
918#endif
919#endif /* BNX2X_INIT_OPS_H */ 934#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index e2e45ee5df33..a2b94650c271 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -11998,7 +11998,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
11998 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 11998 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
11999} 11999}
12000 12000
12001static void bnx2x_set_rx_filter(struct link_params *params, u8 en) 12001void bnx2x_set_rx_filter(struct link_params *params, u8 en)
12002{ 12002{
12003 struct bnx2x *bp = params->bp; 12003 struct bnx2x *bp = params->bp;
12004 u8 val = en * 0x1F; 12004 u8 val = en * 0x1F;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 9165b89a4b19..ba981ced628b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -432,7 +432,8 @@ int bnx2x_phy_probe(struct link_params *params);
432u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, 432u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
433 u32 shmem2_base, u8 port); 433 u32 shmem2_base, u8 port);
434 434
435 435/* Open / close the gate between the NIG and the BRB */
436void bnx2x_set_rx_filter(struct link_params *params, u8 en);
436 437
437/* DCBX structs */ 438/* DCBX structs */
438 439
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d5648fc666bd..0546cf4f762e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -791,10 +791,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
791 791
792 /* host sb data */ 792 /* host sb data */
793 793
794#ifdef BCM_CNIC
795 if (IS_FCOE_FP(fp)) 794 if (IS_FCOE_FP(fp))
796 continue; 795 continue;
797#endif 796
798 BNX2X_ERR(" run indexes ("); 797 BNX2X_ERR(" run indexes (");
799 for (j = 0; j < HC_SB_MAX_SM; j++) 798 for (j = 0; j < HC_SB_MAX_SM; j++)
800 pr_cont("0x%x%s", 799 pr_cont("0x%x%s",
@@ -859,7 +858,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
859#ifdef BNX2X_STOP_ON_ERROR 858#ifdef BNX2X_STOP_ON_ERROR
860 /* Rings */ 859 /* Rings */
861 /* Rx */ 860 /* Rx */
862 for_each_rx_queue(bp, i) { 861 for_each_valid_rx_queue(bp, i) {
863 struct bnx2x_fastpath *fp = &bp->fp[i]; 862 struct bnx2x_fastpath *fp = &bp->fp[i];
864 863
865 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 864 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -893,7 +892,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
893 } 892 }
894 893
895 /* Tx */ 894 /* Tx */
896 for_each_tx_queue(bp, i) { 895 for_each_valid_tx_queue(bp, i) {
897 struct bnx2x_fastpath *fp = &bp->fp[i]; 896 struct bnx2x_fastpath *fp = &bp->fp[i];
898 for_each_cos_in_tx_queue(fp, cos) { 897 for_each_cos_in_tx_queue(fp, cos) {
899 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 898 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@ -1504,9 +1503,8 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1504 if (msix) { 1503 if (msix) {
1505 synchronize_irq(bp->msix_table[0].vector); 1504 synchronize_irq(bp->msix_table[0].vector);
1506 offset = 1; 1505 offset = 1;
1507#ifdef BCM_CNIC 1506 if (CNIC_SUPPORT(bp))
1508 offset++; 1507 offset++;
1509#endif
1510 for_each_eth_queue(bp, i) 1508 for_each_eth_queue(bp, i)
1511 synchronize_irq(bp->msix_table[offset++].vector); 1509 synchronize_irq(bp->msix_table[offset++].vector);
1512 } else 1510 } else
@@ -1588,9 +1586,8 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1588 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1586 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1589} 1587}
1590 1588
1591#ifdef BCM_CNIC
1592static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1589static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1593#endif 1590
1594 1591
1595void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1592void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1596{ 1593{
@@ -1720,7 +1717,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1720 for_each_eth_queue(bp, i) { 1717 for_each_eth_queue(bp, i) {
1721 struct bnx2x_fastpath *fp = &bp->fp[i]; 1718 struct bnx2x_fastpath *fp = &bp->fp[i];
1722 1719
1723 mask = 0x2 << (fp->index + CNIC_PRESENT); 1720 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1724 if (status & mask) { 1721 if (status & mask) {
1725 /* Handle Rx or Tx according to SB id */ 1722 /* Handle Rx or Tx according to SB id */
1726 prefetch(fp->rx_cons_sb); 1723 prefetch(fp->rx_cons_sb);
@@ -1732,22 +1729,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1732 } 1729 }
1733 } 1730 }
1734 1731
1735#ifdef BCM_CNIC 1732 if (CNIC_SUPPORT(bp)) {
1736 mask = 0x2; 1733 mask = 0x2;
1737 if (status & (mask | 0x1)) { 1734 if (status & (mask | 0x1)) {
1738 struct cnic_ops *c_ops = NULL; 1735 struct cnic_ops *c_ops = NULL;
1739 1736
1740 if (likely(bp->state == BNX2X_STATE_OPEN)) { 1737 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1741 rcu_read_lock(); 1738 rcu_read_lock();
1742 c_ops = rcu_dereference(bp->cnic_ops); 1739 c_ops = rcu_dereference(bp->cnic_ops);
1743 if (c_ops) 1740 if (c_ops)
1744 c_ops->cnic_handler(bp->cnic_data, NULL); 1741 c_ops->cnic_handler(bp->cnic_data,
1745 rcu_read_unlock(); 1742 NULL);
1746 } 1743 rcu_read_unlock();
1744 }
1747 1745
1748 status &= ~mask; 1746 status &= ~mask;
1747 }
1749 } 1748 }
1750#endif
1751 1749
1752 if (unlikely(status & 0x1)) { 1750 if (unlikely(status & 0x1)) {
1753 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1751 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -3075,11 +3073,13 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3075 3073
3076static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3074static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3077{ 3075{
3078#ifdef BCM_CNIC
3079 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3076 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3080 struct fcoe_stats_info *fcoe_stat = 3077 struct fcoe_stats_info *fcoe_stat =
3081 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3078 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3082 3079
3080 if (!CNIC_LOADED(bp))
3081 return;
3082
3083 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, 3083 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3084 bp->fip_mac, ETH_ALEN); 3084 bp->fip_mac, ETH_ALEN);
3085 3085
@@ -3162,16 +3162,17 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3162 3162
3163 /* ask L5 driver to add data to the struct */ 3163 /* ask L5 driver to add data to the struct */
3164 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3164 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3165#endif
3166} 3165}
3167 3166
3168static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3167static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3169{ 3168{
3170#ifdef BCM_CNIC
3171 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3169 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3172 struct iscsi_stats_info *iscsi_stat = 3170 struct iscsi_stats_info *iscsi_stat =
3173 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3171 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3174 3172
3173 if (!CNIC_LOADED(bp))
3174 return;
3175
3175 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, 3176 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3176 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3177 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3177 3178
@@ -3180,7 +3181,6 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3180 3181
3181 /* ask L5 driver to add data to the struct */ 3182 /* ask L5 driver to add data to the struct */
3182 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3183 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3183#endif
3184} 3184}
3185 3185
3186/* called due to MCP event (on pmf): 3186/* called due to MCP event (on pmf):
@@ -4572,7 +4572,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4572 mmiowb(); /* keep prod updates ordered */ 4572 mmiowb(); /* keep prod updates ordered */
4573} 4573}
4574 4574
4575#ifdef BCM_CNIC
4576static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4575static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4577 union event_ring_elem *elem) 4576 union event_ring_elem *elem)
4578{ 4577{
@@ -4594,7 +4593,6 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4594 bnx2x_cnic_cfc_comp(bp, cid, err); 4593 bnx2x_cnic_cfc_comp(bp, cid, err);
4595 return 0; 4594 return 0;
4596} 4595}
4597#endif
4598 4596
4599static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4597static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4600{ 4598{
@@ -4635,11 +4633,9 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4635 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 4633 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4636 case BNX2X_FILTER_MAC_PENDING: 4634 case BNX2X_FILTER_MAC_PENDING:
4637 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4635 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4638#ifdef BCM_CNIC 4636 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4639 if (cid == BNX2X_ISCSI_ETH_CID(bp))
4640 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4637 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4641 else 4638 else
4642#endif
4643 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 4639 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4644 4640
4645 break; 4641 break;
@@ -4665,9 +4661,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4665 4661
4666} 4662}
4667 4663
4668#ifdef BCM_CNIC
4669static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4664static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4670#endif
4671 4665
4672static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4666static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4673{ 4667{
@@ -4678,14 +4672,12 @@ static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4678 /* Send rx_mode command again if was requested */ 4672 /* Send rx_mode command again if was requested */
4679 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4673 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4680 bnx2x_set_storm_rx_mode(bp); 4674 bnx2x_set_storm_rx_mode(bp);
4681#ifdef BCM_CNIC
4682 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4675 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4683 &bp->sp_state)) 4676 &bp->sp_state))
4684 bnx2x_set_iscsi_eth_rx_mode(bp, true); 4677 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4685 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 4678 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4686 &bp->sp_state)) 4679 &bp->sp_state))
4687 bnx2x_set_iscsi_eth_rx_mode(bp, false); 4680 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4688#endif
4689 4681
4690 netif_addr_unlock_bh(bp->dev); 4682 netif_addr_unlock_bh(bp->dev);
4691} 4683}
@@ -4747,7 +4739,6 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4747 q); 4739 q);
4748 } 4740 }
4749 4741
4750#ifdef BCM_CNIC
4751 if (!NO_FCOE(bp)) { 4742 if (!NO_FCOE(bp)) {
4752 fp = &bp->fp[FCOE_IDX(bp)]; 4743 fp = &bp->fp[FCOE_IDX(bp)];
4753 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4744 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4770,22 +4761,16 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4770 bnx2x_link_report(bp); 4761 bnx2x_link_report(bp);
4771 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4762 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4772 } 4763 }
4773#else
4774 /* If no FCoE ring - ACK MCP now */
4775 bnx2x_link_report(bp);
4776 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4777#endif /* BCM_CNIC */
4778} 4764}
4779 4765
4780static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4766static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4781 struct bnx2x *bp, u32 cid) 4767 struct bnx2x *bp, u32 cid)
4782{ 4768{
4783 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4769 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4784#ifdef BCM_CNIC 4770
4785 if (cid == BNX2X_FCOE_ETH_CID(bp)) 4771 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
4786 return &bnx2x_fcoe_sp_obj(bp, q_obj); 4772 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4787 else 4773 else
4788#endif
4789 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 4774 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4790} 4775}
4791 4776
@@ -4793,6 +4778,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4793{ 4778{
4794 u16 hw_cons, sw_cons, sw_prod; 4779 u16 hw_cons, sw_cons, sw_prod;
4795 union event_ring_elem *elem; 4780 union event_ring_elem *elem;
4781 u8 echo;
4796 u32 cid; 4782 u32 cid;
4797 u8 opcode; 4783 u8 opcode;
4798 int spqe_cnt = 0; 4784 int spqe_cnt = 0;
@@ -4847,10 +4833,11 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4847 */ 4833 */
4848 DP(BNX2X_MSG_SP, 4834 DP(BNX2X_MSG_SP,
4849 "got delete ramrod for MULTI[%d]\n", cid); 4835 "got delete ramrod for MULTI[%d]\n", cid);
4850#ifdef BCM_CNIC 4836
4851 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 4837 if (CNIC_LOADED(bp) &&
4838 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4852 goto next_spqe; 4839 goto next_spqe;
4853#endif 4840
4854 q_obj = bnx2x_cid_to_q_obj(bp, cid); 4841 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4855 4842
4856 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 4843 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
@@ -4875,21 +4862,34 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4875 break; 4862 break;
4876 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4863 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4877 goto next_spqe; 4864 goto next_spqe;
4865
4878 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4866 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4879 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 4867 echo = elem->message.data.function_update_event.echo;
4880 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 4868 if (echo == SWITCH_UPDATE) {
4881 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); 4869 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4870 "got FUNC_SWITCH_UPDATE ramrod\n");
4871 if (f_obj->complete_cmd(
4872 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
4873 break;
4882 4874
4883 /* We will perform the Queues update from sp_rtnl task 4875 } else {
4884 * as all Queue SP operations should run under 4876 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4885 * rtnl_lock. 4877 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4886 */ 4878 f_obj->complete_cmd(bp, f_obj,
4887 smp_mb__before_clear_bit(); 4879 BNX2X_F_CMD_AFEX_UPDATE);
4888 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 4880
4889 &bp->sp_rtnl_state); 4881 /* We will perform the Queues update from
4890 smp_mb__after_clear_bit(); 4882 * sp_rtnl task as all Queue SP operations
4883 * should run under rtnl_lock.
4884 */
4885 smp_mb__before_clear_bit();
4886 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4887 &bp->sp_rtnl_state);
4888 smp_mb__after_clear_bit();
4889
4890 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4891 }
4891 4892
4892 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4893 goto next_spqe; 4893 goto next_spqe;
4894 4894
4895 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 4895 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@ -4999,11 +4999,10 @@ static void bnx2x_sp_task(struct work_struct *work)
4999 4999
5000 /* SP events: STAT_QUERY and others */ 5000 /* SP events: STAT_QUERY and others */
5001 if (status & BNX2X_DEF_SB_IDX) { 5001 if (status & BNX2X_DEF_SB_IDX) {
5002#ifdef BCM_CNIC
5003 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5002 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5004 5003
5005 if ((!NO_FCOE(bp)) && 5004 if (FCOE_INIT(bp) &&
5006 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5005 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5007 /* 5006 /*
5008 * Prevent local bottom-halves from running as 5007 * Prevent local bottom-halves from running as
5009 * we are going to change the local NAPI list. 5008 * we are going to change the local NAPI list.
@@ -5012,7 +5011,7 @@ static void bnx2x_sp_task(struct work_struct *work)
5012 napi_schedule(&bnx2x_fcoe(bp, napi)); 5011 napi_schedule(&bnx2x_fcoe(bp, napi));
5013 local_bh_enable(); 5012 local_bh_enable();
5014 } 5013 }
5015#endif 5014
5016 /* Handle EQ completions */ 5015 /* Handle EQ completions */
5017 bnx2x_eq_int(bp); 5016 bnx2x_eq_int(bp);
5018 5017
@@ -5050,8 +5049,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5050 return IRQ_HANDLED; 5049 return IRQ_HANDLED;
5051#endif 5050#endif
5052 5051
5053#ifdef BCM_CNIC 5052 if (CNIC_LOADED(bp)) {
5054 {
5055 struct cnic_ops *c_ops; 5053 struct cnic_ops *c_ops;
5056 5054
5057 rcu_read_lock(); 5055 rcu_read_lock();
@@ -5060,7 +5058,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5060 c_ops->cnic_handler(bp->cnic_data, NULL); 5058 c_ops->cnic_handler(bp->cnic_data, NULL);
5061 rcu_read_unlock(); 5059 rcu_read_unlock();
5062 } 5060 }
5063#endif 5061
5064 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 5062 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
5065 5063
5066 return IRQ_HANDLED; 5064 return IRQ_HANDLED;
@@ -5498,12 +5496,10 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5498 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5496 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5499 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5497 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5500 5498
5501#ifdef BCM_CNIC
5502 if (!NO_FCOE(bp)) 5499 if (!NO_FCOE(bp))
5503 5500
5504 /* Configure rx_mode of FCoE Queue */ 5501 /* Configure rx_mode of FCoE Queue */
5505 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5502 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5506#endif
5507 5503
5508 switch (bp->rx_mode) { 5504 switch (bp->rx_mode) {
5509 case BNX2X_RX_MODE_NONE: 5505 case BNX2X_RX_MODE_NONE:
@@ -5624,12 +5620,12 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5624 5620
5625static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5621static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5626{ 5622{
5627 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; 5623 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
5628} 5624}
5629 5625
5630static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5626static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5631{ 5627{
5632 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5628 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
5633} 5629}
5634 5630
5635static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5631static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5720,23 +5716,25 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5720 txdata->tx_pkt = 0; 5716 txdata->tx_pkt = 0;
5721} 5717}
5722 5718
5719static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5720{
5721 int i;
5722
5723 for_each_tx_queue_cnic(bp, i)
5724 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
5725}
5723static void bnx2x_init_tx_rings(struct bnx2x *bp) 5726static void bnx2x_init_tx_rings(struct bnx2x *bp)
5724{ 5727{
5725 int i; 5728 int i;
5726 u8 cos; 5729 u8 cos;
5727 5730
5728 for_each_tx_queue(bp, i) 5731 for_each_eth_queue(bp, i)
5729 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5732 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5730 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 5733 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5731} 5734}
5732 5735
5733void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5736void bnx2x_nic_init_cnic(struct bnx2x *bp)
5734{ 5737{
5735 int i;
5736
5737 for_each_eth_queue(bp, i)
5738 bnx2x_init_eth_fp(bp, i);
5739#ifdef BCM_CNIC
5740 if (!NO_FCOE(bp)) 5738 if (!NO_FCOE(bp))
5741 bnx2x_init_fcoe_fp(bp); 5739 bnx2x_init_fcoe_fp(bp);
5742 5740
@@ -5744,8 +5742,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5744 BNX2X_VF_ID_INVALID, false, 5742 BNX2X_VF_ID_INVALID, false,
5745 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 5743 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5746 5744
5747#endif 5745 /* ensure status block indices were read */
5746 rmb();
5747 bnx2x_init_rx_rings_cnic(bp);
5748 bnx2x_init_tx_rings_cnic(bp);
5749
5750 /* flush all */
5751 mb();
5752 mmiowb();
5753}
5748 5754
5755void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5756{
5757 int i;
5758
5759 for_each_eth_queue(bp, i)
5760 bnx2x_init_eth_fp(bp, i);
5749 /* Initialize MOD_ABS interrupts */ 5761 /* Initialize MOD_ABS interrupts */
5750 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 5762 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5751 bp->common.shmem_base, bp->common.shmem2_base, 5763 bp->common.shmem_base, bp->common.shmem2_base,
@@ -6031,10 +6043,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6031 msleep(50); 6043 msleep(50);
6032 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6044 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6033 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6045 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6034#ifndef BCM_CNIC 6046 if (!CNIC_SUPPORT(bp))
6035 /* set NIC mode */ 6047 /* set NIC mode */
6036 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6048 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6037#endif
6038 6049
6039 /* Enable inputs of parser neighbor blocks */ 6050 /* Enable inputs of parser neighbor blocks */
6040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6051 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
@@ -6522,9 +6533,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6522 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6533 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6523 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6534 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6524 6535
6525#ifdef BCM_CNIC 6536 if (CNIC_SUPPORT(bp))
6526 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6537 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6527#endif
6528 6538
6529 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6539 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6530 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6540 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
@@ -6611,18 +6621,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6611 6621
6612 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6622 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6613 6623
6614#ifdef BCM_CNIC 6624 if (CNIC_SUPPORT(bp)) {
6615 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6625 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6616 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6626 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6617 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6627 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6618 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6628 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6619 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6629 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6620 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6630 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6621 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6631 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6622 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6632 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6623 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6633 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6624 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6634 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6625#endif 6635 }
6626 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6636 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6627 6637
6628 if (sizeof(union cdu_context) != 1024) 6638 if (sizeof(union cdu_context) != 1024)
@@ -6786,11 +6796,11 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6786 /* QM cid (connection) count */ 6796 /* QM cid (connection) count */
6787 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 6797 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6788 6798
6789#ifdef BCM_CNIC 6799 if (CNIC_SUPPORT(bp)) {
6790 bnx2x_init_block(bp, BLOCK_TM, init_phase); 6800 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6801 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6802 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6793#endif 6803 }
6794 6804
6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6805 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6796 6806
@@ -6876,9 +6886,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6876 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6886 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6877 } 6887 }
6878 6888
6879#ifdef BCM_CNIC 6889 if (CNIC_SUPPORT(bp))
6880 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 6890 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6881#endif 6891
6882 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 6892 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6883 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 6893 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6884 6894
@@ -7039,6 +7049,130 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7039 bnx2x_ilt_wr(bp, i, 0); 7049 bnx2x_ilt_wr(bp, i, 0);
7040} 7050}
7041 7051
7052
7053void bnx2x_init_searcher(struct bnx2x *bp)
7054{
7055 int port = BP_PORT(bp);
7056 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7057 /* T1 hash bits value determines the T1 number of entries */
7058 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7059}
7060
7061static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7062{
7063 int rc;
7064 struct bnx2x_func_state_params func_params = {NULL};
7065 struct bnx2x_func_switch_update_params *switch_update_params =
7066 &func_params.params.switch_update;
7067
7068 /* Prepare parameters for function state transitions */
7069 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7070 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7071
7072 func_params.f_obj = &bp->func_obj;
7073 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7074
7075 /* Function parameters */
7076 switch_update_params->suspend = suspend;
7077
7078 rc = bnx2x_func_state_change(bp, &func_params);
7079
7080 return rc;
7081}
7082
7083int bnx2x_reset_nic_mode(struct bnx2x *bp)
7084{
7085 int rc, i, port = BP_PORT(bp);
7086 int vlan_en = 0, mac_en[NUM_MACS];
7087
7088
7089 /* Close input from network */
7090 if (bp->mf_mode == SINGLE_FUNCTION) {
7091 bnx2x_set_rx_filter(&bp->link_params, 0);
7092 } else {
7093 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7094 NIG_REG_LLH0_FUNC_EN);
7095 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7096 NIG_REG_LLH0_FUNC_EN, 0);
7097 for (i = 0; i < NUM_MACS; i++) {
7098 mac_en[i] = REG_RD(bp, port ?
7099 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7100 4 * i) :
7101 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7102 4 * i));
7103 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7104 4 * i) :
7105 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7106 }
7107 }
7108
7109 /* Close BMC to host */
7110 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7111 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7112
7113 /* Suspend Tx switching to the PF. Completion of this ramrod
7114 * further guarantees that all the packets of that PF / child
7115 * VFs in BRB were processed by the Parser, so it is safe to
7116 * change the NIC_MODE register.
7117 */
7118 rc = bnx2x_func_switch_update(bp, 1);
7119 if (rc) {
7120 BNX2X_ERR("Can't suspend tx-switching!\n");
7121 return rc;
7122 }
7123
7124 /* Change NIC_MODE register */
7125 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7126
7127 /* Open input from network */
7128 if (bp->mf_mode == SINGLE_FUNCTION) {
7129 bnx2x_set_rx_filter(&bp->link_params, 1);
7130 } else {
7131 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7132 NIG_REG_LLH0_FUNC_EN, vlan_en);
7133 for (i = 0; i < NUM_MACS; i++) {
7134 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7135 4 * i) :
7136 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7137 mac_en[i]);
7138 }
7139 }
7140
7141 /* Enable BMC to host */
7142 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7143 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7144
7145 /* Resume Tx switching to the PF */
7146 rc = bnx2x_func_switch_update(bp, 0);
7147 if (rc) {
7148 BNX2X_ERR("Can't resume tx-switching!\n");
7149 return rc;
7150 }
7151
7152 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7153 return 0;
7154}
7155
7156int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7157{
7158 int rc;
7159
7160 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7161
7162 if (CONFIGURE_NIC_MODE(bp)) {
7163 /* Configrue searcher as part of function hw init */
7164 bnx2x_init_searcher(bp);
7165
7166 /* Reset NIC mode */
7167 rc = bnx2x_reset_nic_mode(bp);
7168 if (rc)
7169 BNX2X_ERR("Can't change NIC mode!\n");
7170 return rc;
7171 }
7172
7173 return 0;
7174}
7175
7042static int bnx2x_init_hw_func(struct bnx2x *bp) 7176static int bnx2x_init_hw_func(struct bnx2x *bp)
7043{ 7177{
7044 int port = BP_PORT(bp); 7178 int port = BP_PORT(bp);
@@ -7081,17 +7215,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7081 } 7215 }
7082 bnx2x_ilt_init_op(bp, INITOP_SET); 7216 bnx2x_ilt_init_op(bp, INITOP_SET);
7083 7217
7084#ifdef BCM_CNIC 7218 if (!CONFIGURE_NIC_MODE(bp)) {
7085 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7219 bnx2x_init_searcher(bp);
7086 7220 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7087 /* T1 hash bits value determines the T1 number of entries */ 7221 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7088 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7222 } else {
7089#endif 7223 /* Set NIC mode */
7224 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7225 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
7090 7226
7091#ifndef BCM_CNIC 7227 }
7092 /* set NIC mode */
7093 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7094#endif /* BCM_CNIC */
7095 7228
7096 if (!CHIP_IS_E1x(bp)) { 7229 if (!CHIP_IS_E1x(bp)) {
7097 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7230 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
@@ -7342,6 +7475,20 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7342} 7475}
7343 7476
7344 7477
7478void bnx2x_free_mem_cnic(struct bnx2x *bp)
7479{
7480 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7481
7482 if (!CHIP_IS_E1x(bp))
7483 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7484 sizeof(struct host_hc_status_block_e2));
7485 else
7486 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7487 sizeof(struct host_hc_status_block_e1x));
7488
7489 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7490}
7491
7345void bnx2x_free_mem(struct bnx2x *bp) 7492void bnx2x_free_mem(struct bnx2x *bp)
7346{ 7493{
7347 int i; 7494 int i;
@@ -7366,17 +7513,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
7366 7513
7367 BNX2X_FREE(bp->ilt->lines); 7514 BNX2X_FREE(bp->ilt->lines);
7368 7515
7369#ifdef BCM_CNIC
7370 if (!CHIP_IS_E1x(bp))
7371 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7372 sizeof(struct host_hc_status_block_e2));
7373 else
7374 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7375 sizeof(struct host_hc_status_block_e1x));
7376
7377 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7378#endif
7379
7380 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7516 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7381 7517
7382 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7518 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -7444,24 +7580,44 @@ alloc_mem_err:
7444 return -ENOMEM; 7580 return -ENOMEM;
7445} 7581}
7446 7582
7447 7583int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7448int bnx2x_alloc_mem(struct bnx2x *bp)
7449{ 7584{
7450 int i, allocated, context_size;
7451
7452#ifdef BCM_CNIC
7453 if (!CHIP_IS_E1x(bp)) 7585 if (!CHIP_IS_E1x(bp))
7454 /* size = the status block + ramrod buffers */ 7586 /* size = the status block + ramrod buffers */
7455 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7587 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7456 sizeof(struct host_hc_status_block_e2)); 7588 sizeof(struct host_hc_status_block_e2));
7457 else 7589 else
7458 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 7590 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7459 sizeof(struct host_hc_status_block_e1x)); 7591 &bp->cnic_sb_mapping,
7592 sizeof(struct
7593 host_hc_status_block_e1x));
7460 7594
7461 /* allocate searcher T2 table */ 7595 if (CONFIGURE_NIC_MODE(bp))
7462 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7596 /* allocate searcher T2 table, as it wan't allocated before */
7463#endif 7597 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7598
7599 /* write address to which L5 should insert its values */
7600 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7601 &bp->slowpath->drv_info_to_mcp;
7602
7603 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7604 goto alloc_mem_err;
7605
7606 return 0;
7607
7608alloc_mem_err:
7609 bnx2x_free_mem_cnic(bp);
7610 BNX2X_ERR("Can't allocate memory\n");
7611 return -ENOMEM;
7612}
7613
7614int bnx2x_alloc_mem(struct bnx2x *bp)
7615{
7616 int i, allocated, context_size;
7464 7617
7618 if (!CONFIGURE_NIC_MODE(bp))
7619 /* allocate searcher T2 table */
7620 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7465 7621
7466 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7622 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7467 sizeof(struct host_sp_status_block)); 7623 sizeof(struct host_sp_status_block));
@@ -7469,11 +7625,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7469 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7625 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7470 sizeof(struct bnx2x_slowpath)); 7626 sizeof(struct bnx2x_slowpath));
7471 7627
7472#ifdef BCM_CNIC
7473 /* write address to which L5 should insert its values */
7474 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
7475#endif
7476
7477 /* Allocated memory for FW statistics */ 7628 /* Allocated memory for FW statistics */
7478 if (bnx2x_alloc_fw_stats_mem(bp)) 7629 if (bnx2x_alloc_fw_stats_mem(bp))
7479 goto alloc_mem_err; 7630 goto alloc_mem_err;
@@ -7595,14 +7746,12 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7595{ 7746{
7596 unsigned long ramrod_flags = 0; 7747 unsigned long ramrod_flags = 0;
7597 7748
7598#ifdef BCM_CNIC
7599 if (is_zero_ether_addr(bp->dev->dev_addr) && 7749 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7600 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7750 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7601 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7751 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7602 "Ignoring Zero MAC for STORAGE SD mode\n"); 7752 "Ignoring Zero MAC for STORAGE SD mode\n");
7603 return 0; 7753 return 0;
7604 } 7754 }
7605#endif
7606 7755
7607 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7756 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7608 7757
@@ -7631,7 +7780,8 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
7631 bnx2x_enable_msi(bp); 7780 bnx2x_enable_msi(bp);
7632 /* falling through... */ 7781 /* falling through... */
7633 case INT_MODE_INTx: 7782 case INT_MODE_INTx:
7634 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7783 bp->num_ethernet_queues = 1;
7784 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
7635 BNX2X_DEV_INFO("set number of queues to 1\n"); 7785 BNX2X_DEV_INFO("set number of queues to 1\n");
7636 break; 7786 break;
7637 default: 7787 default:
@@ -7643,9 +7793,10 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
7643 bp->flags & USING_SINGLE_MSIX_FLAG) { 7793 bp->flags & USING_SINGLE_MSIX_FLAG) {
7644 /* failed to enable multiple MSI-X */ 7794 /* failed to enable multiple MSI-X */
7645 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 7795 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7646 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7796 bp->num_queues,
7797 1 + bp->num_cnic_queues);
7647 7798
7648 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7799 bp->num_queues = 1 + bp->num_cnic_queues;
7649 7800
7650 /* Try to enable MSI */ 7801 /* Try to enable MSI */
7651 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && 7802 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@ -7678,9 +7829,9 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7678 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 7829 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7679 ilt_client->start = line; 7830 ilt_client->start = line;
7680 line += bnx2x_cid_ilt_lines(bp); 7831 line += bnx2x_cid_ilt_lines(bp);
7681#ifdef BCM_CNIC 7832
7682 line += CNIC_ILT_LINES; 7833 if (CNIC_SUPPORT(bp))
7683#endif 7834 line += CNIC_ILT_LINES;
7684 ilt_client->end = line - 1; 7835 ilt_client->end = line - 1;
7685 7836
7686 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7837 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
@@ -7713,49 +7864,43 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7713 ilog2(ilt_client->page_size >> 12)); 7864 ilog2(ilt_client->page_size >> 12));
7714 7865
7715 } 7866 }
7716 /* SRC */
7717 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7718#ifdef BCM_CNIC
7719 ilt_client->client_num = ILT_CLIENT_SRC;
7720 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7721 ilt_client->flags = 0;
7722 ilt_client->start = line;
7723 line += SRC_ILT_LINES;
7724 ilt_client->end = line - 1;
7725 7867
7726 DP(NETIF_MSG_IFUP, 7868 if (CNIC_SUPPORT(bp)) {
7727 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7869 /* SRC */
7728 ilt_client->start, 7870 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7729 ilt_client->end, 7871 ilt_client->client_num = ILT_CLIENT_SRC;
7730 ilt_client->page_size, 7872 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7731 ilt_client->flags, 7873 ilt_client->flags = 0;
7732 ilog2(ilt_client->page_size >> 12)); 7874 ilt_client->start = line;
7875 line += SRC_ILT_LINES;
7876 ilt_client->end = line - 1;
7733 7877
7734#else 7878 DP(NETIF_MSG_IFUP,
7735 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7879 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7736#endif 7880 ilt_client->start,
7881 ilt_client->end,
7882 ilt_client->page_size,
7883 ilt_client->flags,
7884 ilog2(ilt_client->page_size >> 12));
7737 7885
7738 /* TM */ 7886 /* TM */
7739 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 7887 ilt_client = &ilt->clients[ILT_CLIENT_TM];
7740#ifdef BCM_CNIC 7888 ilt_client->client_num = ILT_CLIENT_TM;
7741 ilt_client->client_num = ILT_CLIENT_TM; 7889 ilt_client->page_size = TM_ILT_PAGE_SZ;
7742 ilt_client->page_size = TM_ILT_PAGE_SZ; 7890 ilt_client->flags = 0;
7743 ilt_client->flags = 0; 7891 ilt_client->start = line;
7744 ilt_client->start = line; 7892 line += TM_ILT_LINES;
7745 line += TM_ILT_LINES; 7893 ilt_client->end = line - 1;
7746 ilt_client->end = line - 1;
7747 7894
7748 DP(NETIF_MSG_IFUP, 7895 DP(NETIF_MSG_IFUP,
7749 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7896 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7750 ilt_client->start, 7897 ilt_client->start,
7751 ilt_client->end, 7898 ilt_client->end,
7752 ilt_client->page_size, 7899 ilt_client->page_size,
7753 ilt_client->flags, 7900 ilt_client->flags,
7754 ilog2(ilt_client->page_size >> 12)); 7901 ilog2(ilt_client->page_size >> 12));
7902 }
7755 7903
7756#else
7757 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
7758#endif
7759 BUG_ON(line > ILT_MAX_LINES); 7904 BUG_ON(line > ILT_MAX_LINES);
7760} 7905}
7761 7906
@@ -7923,6 +8068,9 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7923 /* Set the command */ 8068 /* Set the command */
7924 q_params.cmd = BNX2X_Q_CMD_SETUP; 8069 q_params.cmd = BNX2X_Q_CMD_SETUP;
7925 8070
8071 if (IS_FCOE_FP(fp))
8072 bp->fcoe_init = true;
8073
7926 /* Change the state to SETUP */ 8074 /* Change the state to SETUP */
7927 rc = bnx2x_queue_state_change(bp, &q_params); 8075 rc = bnx2x_queue_state_change(bp, &q_params);
7928 if (rc) { 8076 if (rc) {
@@ -8036,12 +8184,12 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8036 SB_DISABLED); 8184 SB_DISABLED);
8037 } 8185 }
8038 8186
8039#ifdef BCM_CNIC 8187 if (CNIC_LOADED(bp))
8040 /* CNIC SB */ 8188 /* CNIC SB */
8041 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8189 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8042 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), 8190 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8043 SB_DISABLED); 8191 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8044#endif 8192
8045 /* SP SB */ 8193 /* SP SB */
8046 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8194 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8047 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8195 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
@@ -8060,19 +8208,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8060 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8208 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8061 } 8209 }
8062 8210
8063#ifdef BCM_CNIC 8211 if (CNIC_LOADED(bp)) {
8064 /* Disable Timer scan */ 8212 /* Disable Timer scan */
8065 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8213 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8066 /* 8214 /*
8067 * Wait for at least 10ms and up to 2 second for the timers scan to 8215 * Wait for at least 10ms and up to 2 second for the timers
8068 * complete 8216 * scan to complete
8069 */ 8217 */
8070 for (i = 0; i < 200; i++) { 8218 for (i = 0; i < 200; i++) {
8071 msleep(10); 8219 msleep(10);
8072 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8220 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8073 break; 8221 break;
8222 }
8074 } 8223 }
8075#endif
8076 /* Clear ILT */ 8224 /* Clear ILT */
8077 bnx2x_clear_func_ilt(bp, func); 8225 bnx2x_clear_func_ilt(bp, func);
8078 8226
@@ -8408,13 +8556,24 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8408 /* Close multi and leading connections 8556 /* Close multi and leading connections
8409 * Completions for ramrods are collected in a synchronous way 8557 * Completions for ramrods are collected in a synchronous way
8410 */ 8558 */
8411 for_each_queue(bp, i) 8559 for_each_eth_queue(bp, i)
8412 if (bnx2x_stop_queue(bp, i)) 8560 if (bnx2x_stop_queue(bp, i))
8413#ifdef BNX2X_STOP_ON_ERROR 8561#ifdef BNX2X_STOP_ON_ERROR
8414 return; 8562 return;
8415#else 8563#else
8416 goto unload_error; 8564 goto unload_error;
8417#endif 8565#endif
8566
8567 if (CNIC_LOADED(bp)) {
8568 for_each_cnic_queue(bp, i)
8569 if (bnx2x_stop_queue(bp, i))
8570#ifdef BNX2X_STOP_ON_ERROR
8571 return;
8572#else
8573 goto unload_error;
8574#endif
8575 }
8576
8418 /* If SP settings didn't get completed so far - something 8577 /* If SP settings didn't get completed so far - something
8419 * very wrong has happen. 8578 * very wrong has happen.
8420 */ 8579 */
@@ -8436,6 +8595,8 @@ unload_error:
8436 bnx2x_netif_stop(bp, 1); 8595 bnx2x_netif_stop(bp, 1);
8437 /* Delete all NAPI objects */ 8596 /* Delete all NAPI objects */
8438 bnx2x_del_all_napi(bp); 8597 bnx2x_del_all_napi(bp);
8598 if (CNIC_LOADED(bp))
8599 bnx2x_del_all_napi_cnic(bp);
8439 8600
8440 /* Release IRQs */ 8601 /* Release IRQs */
8441 bnx2x_free_irq(bp); 8602 bnx2x_free_irq(bp);
@@ -10223,12 +10384,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10223void bnx2x_get_iscsi_info(struct bnx2x *bp) 10384void bnx2x_get_iscsi_info(struct bnx2x *bp)
10224{ 10385{
10225 u32 no_flags = NO_ISCSI_FLAG; 10386 u32 no_flags = NO_ISCSI_FLAG;
10226#ifdef BCM_CNIC
10227 int port = BP_PORT(bp); 10387 int port = BP_PORT(bp);
10228
10229 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10388 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10230 drv_lic_key[port].max_iscsi_conn); 10389 drv_lic_key[port].max_iscsi_conn);
10231 10390
10391 if (!CNIC_SUPPORT(bp)) {
10392 bp->flags |= no_flags;
10393 return;
10394 }
10395
10232 /* Get the number of maximum allowed iSCSI connections */ 10396 /* Get the number of maximum allowed iSCSI connections */
10233 bp->cnic_eth_dev.max_iscsi_conn = 10397 bp->cnic_eth_dev.max_iscsi_conn =
10234 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10398 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
@@ -10243,12 +10407,9 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
10243 */ 10407 */
10244 if (!bp->cnic_eth_dev.max_iscsi_conn) 10408 if (!bp->cnic_eth_dev.max_iscsi_conn)
10245 bp->flags |= no_flags; 10409 bp->flags |= no_flags;
10246#else 10410
10247 bp->flags |= no_flags;
10248#endif
10249} 10411}
10250 10412
10251#ifdef BCM_CNIC
10252static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10413static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10253{ 10414{
10254 /* Port info */ 10415 /* Port info */
@@ -10263,16 +10424,18 @@ static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10263 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10424 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10264 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10425 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10265} 10426}
10266#endif
10267static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) 10427static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10268{ 10428{
10269#ifdef BCM_CNIC
10270 int port = BP_PORT(bp); 10429 int port = BP_PORT(bp);
10271 int func = BP_ABS_FUNC(bp); 10430 int func = BP_ABS_FUNC(bp);
10272
10273 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10431 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10274 drv_lic_key[port].max_fcoe_conn); 10432 drv_lic_key[port].max_fcoe_conn);
10275 10433
10434 if (!CNIC_SUPPORT(bp)) {
10435 bp->flags |= NO_FCOE_FLAG;
10436 return;
10437 }
10438
10276 /* Get the number of maximum allowed FCoE connections */ 10439 /* Get the number of maximum allowed FCoE connections */
10277 bp->cnic_eth_dev.max_fcoe_conn = 10440 bp->cnic_eth_dev.max_fcoe_conn =
10278 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10441 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
@@ -10318,9 +10481,6 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10318 */ 10481 */
10319 if (!bp->cnic_eth_dev.max_fcoe_conn) 10482 if (!bp->cnic_eth_dev.max_fcoe_conn)
10320 bp->flags |= NO_FCOE_FLAG; 10483 bp->flags |= NO_FCOE_FLAG;
10321#else
10322 bp->flags |= NO_FCOE_FLAG;
10323#endif
10324} 10484}
10325 10485
10326static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) 10486static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -10334,132 +10494,133 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
10334 bnx2x_get_fcoe_info(bp); 10494 bnx2x_get_fcoe_info(bp);
10335} 10495}
10336 10496
10337static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 10497static void __devinit bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10338{ 10498{
10339 u32 val, val2; 10499 u32 val, val2;
10340 int func = BP_ABS_FUNC(bp); 10500 int func = BP_ABS_FUNC(bp);
10341 int port = BP_PORT(bp); 10501 int port = BP_PORT(bp);
10342#ifdef BCM_CNIC
10343 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10502 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10344 u8 *fip_mac = bp->fip_mac; 10503 u8 *fip_mac = bp->fip_mac;
10345#endif
10346 10504
10347 /* Zero primary MAC configuration */ 10505 if (IS_MF(bp)) {
10348 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10506 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10349
10350 if (BP_NOMCP(bp)) {
10351 BNX2X_ERROR("warning: random MAC workaround active\n");
10352 eth_hw_addr_random(bp->dev);
10353 } else if (IS_MF(bp)) {
10354 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10355 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10356 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10357 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10358 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10359
10360#ifdef BCM_CNIC
10361 /*
10362 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10363 * FCoE MAC then the appropriate feature should be disabled. 10507 * FCoE MAC then the appropriate feature should be disabled.
10364 * 10508 * In non SD mode features configuration comes from struct
10365 * In non SD mode features configuration comes from 10509 * func_ext_config.
10366 * struct func_ext_config.
10367 */ 10510 */
10368 if (!IS_MF_SD(bp)) { 10511 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
10369 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10512 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10370 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 10513 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10371 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10514 val2 = MF_CFG_RD(bp, func_ext_config[func].
10372 iscsi_mac_addr_upper); 10515 iscsi_mac_addr_upper);
10373 val = MF_CFG_RD(bp, func_ext_config[func]. 10516 val = MF_CFG_RD(bp, func_ext_config[func].
10374 iscsi_mac_addr_lower); 10517 iscsi_mac_addr_lower);
10375 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10518 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10376 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10519 BNX2X_DEV_INFO
10377 iscsi_mac); 10520 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10378 } else 10521 } else {
10379 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 10522 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10523 }
10380 10524
10381 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 10525 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10382 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10526 val2 = MF_CFG_RD(bp, func_ext_config[func].
10383 fcoe_mac_addr_upper); 10527 fcoe_mac_addr_upper);
10384 val = MF_CFG_RD(bp, func_ext_config[func]. 10528 val = MF_CFG_RD(bp, func_ext_config[func].
10385 fcoe_mac_addr_lower); 10529 fcoe_mac_addr_lower);
10386 bnx2x_set_mac_buf(fip_mac, val, val2); 10530 bnx2x_set_mac_buf(fip_mac, val, val2);
10387 BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", 10531 BNX2X_DEV_INFO
10388 fip_mac); 10532 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10389 10533 } else {
10390 } else
10391 bp->flags |= NO_FCOE_FLAG; 10534 bp->flags |= NO_FCOE_FLAG;
10535 }
10392 10536
10393 bp->mf_ext_config = cfg; 10537 bp->mf_ext_config = cfg;
10394 10538
10395 } else { /* SD MODE */ 10539 } else { /* SD MODE */
10396 if (IS_MF_STORAGE_SD(bp)) { 10540 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10397 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10541 /* use primary mac as iscsi mac */
10398 /* use primary mac as iscsi mac */ 10542 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10399 memcpy(iscsi_mac, bp->dev->dev_addr, 10543
10400 ETH_ALEN); 10544 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10401 10545 BNX2X_DEV_INFO
10402 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 10546 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10403 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10547 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10404 iscsi_mac); 10548 /* use primary mac as fip mac */
10405 } else { /* FCoE */ 10549 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10406 memcpy(fip_mac, bp->dev->dev_addr, 10550 BNX2X_DEV_INFO("SD FCoE MODE\n");
10407 ETH_ALEN); 10551 BNX2X_DEV_INFO
10408 BNX2X_DEV_INFO("SD FCoE MODE\n"); 10552 ("Read FIP MAC: %pM\n", fip_mac);
10409 BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
10410 fip_mac);
10411 }
10412 /* Zero primary MAC configuration */
10413 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10414 } 10553 }
10415 } 10554 }
10416 10555
10556 if (IS_MF_STORAGE_SD(bp))
10557 /* Zero primary MAC configuration */
10558 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10559
10417 if (IS_MF_FCOE_AFEX(bp)) 10560 if (IS_MF_FCOE_AFEX(bp))
10418 /* use FIP MAC as primary MAC */ 10561 /* use FIP MAC as primary MAC */
10419 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10562 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10420 10563
10421#endif
10422 } else { 10564 } else {
10423 /* in SF read MACs from port configuration */
10424 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10425 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10426 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10427
10428#ifdef BCM_CNIC
10429 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10565 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10430 iscsi_mac_upper); 10566 iscsi_mac_upper);
10431 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10567 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10432 iscsi_mac_lower); 10568 iscsi_mac_lower);
10433 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10569 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10434 10570
10435 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10571 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10436 fcoe_fip_mac_upper); 10572 fcoe_fip_mac_upper);
10437 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10573 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10438 fcoe_fip_mac_lower); 10574 fcoe_fip_mac_lower);
10439 bnx2x_set_mac_buf(fip_mac, val, val2); 10575 bnx2x_set_mac_buf(fip_mac, val, val2);
10440#endif
10441 } 10576 }
10442 10577
10443 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 10578 /* Disable iSCSI OOO if MAC configuration is invalid. */
10444 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10445
10446#ifdef BCM_CNIC
10447 /* Disable iSCSI if MAC configuration is
10448 * invalid.
10449 */
10450 if (!is_valid_ether_addr(iscsi_mac)) { 10579 if (!is_valid_ether_addr(iscsi_mac)) {
10451 bp->flags |= NO_ISCSI_FLAG; 10580 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10452 memset(iscsi_mac, 0, ETH_ALEN); 10581 memset(iscsi_mac, 0, ETH_ALEN);
10453 } 10582 }
10454 10583
10455 /* Disable FCoE if MAC configuration is 10584 /* Disable FCoE if MAC configuration is invalid. */
10456 * invalid.
10457 */
10458 if (!is_valid_ether_addr(fip_mac)) { 10585 if (!is_valid_ether_addr(fip_mac)) {
10459 bp->flags |= NO_FCOE_FLAG; 10586 bp->flags |= NO_FCOE_FLAG;
10460 memset(bp->fip_mac, 0, ETH_ALEN); 10587 memset(bp->fip_mac, 0, ETH_ALEN);
10461 } 10588 }
10462#endif 10589}
10590
10591static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
10592{
10593 u32 val, val2;
10594 int func = BP_ABS_FUNC(bp);
10595 int port = BP_PORT(bp);
10596
10597 /* Zero primary MAC configuration */
10598 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10599
10600 if (BP_NOMCP(bp)) {
10601 BNX2X_ERROR("warning: random MAC workaround active\n");
10602 eth_hw_addr_random(bp->dev);
10603 } else if (IS_MF(bp)) {
10604 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10605 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10606 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10607 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10608 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10609
10610 if (CNIC_SUPPORT(bp))
10611 bnx2x_get_cnic_mac_hwinfo(bp);
10612 } else {
10613 /* in SF read MACs from port configuration */
10614 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10615 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10616 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10617
10618 if (CNIC_SUPPORT(bp))
10619 bnx2x_get_cnic_mac_hwinfo(bp);
10620 }
10621
10622 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
10623 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10463 10624
10464 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 10625 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
10465 dev_err(&bp->pdev->dev, 10626 dev_err(&bp->pdev->dev,
@@ -10836,9 +10997,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10836 mutex_init(&bp->port.phy_mutex); 10997 mutex_init(&bp->port.phy_mutex);
10837 mutex_init(&bp->fw_mb_mutex); 10998 mutex_init(&bp->fw_mb_mutex);
10838 spin_lock_init(&bp->stats_lock); 10999 spin_lock_init(&bp->stats_lock);
10839#ifdef BCM_CNIC 11000
10840 mutex_init(&bp->cnic_mutex);
10841#endif
10842 11001
10843 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11002 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
10844 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11003 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -10876,10 +11035,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10876 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 11035 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
10877 11036
10878 bp->disable_tpa = disable_tpa; 11037 bp->disable_tpa = disable_tpa;
10879
10880#ifdef BCM_CNIC
10881 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11038 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
10882#endif
10883 11039
10884 /* Set TPA flags */ 11040 /* Set TPA flags */
10885 if (bp->disable_tpa) { 11041 if (bp->disable_tpa) {
@@ -10913,12 +11069,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10913 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 11069 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
10914 bnx2x_dcbx_init_params(bp); 11070 bnx2x_dcbx_init_params(bp);
10915 11071
10916#ifdef BCM_CNIC
10917 if (CHIP_IS_E1x(bp)) 11072 if (CHIP_IS_E1x(bp))
10918 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 11073 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
10919 else 11074 else
10920 bp->cnic_base_cl_id = FP_SB_MAX_E2; 11075 bp->cnic_base_cl_id = FP_SB_MAX_E2;
10921#endif
10922 11076
10923 /* multiple tx priority */ 11077 /* multiple tx priority */
10924 if (CHIP_IS_E1x(bp)) 11078 if (CHIP_IS_E1x(bp))
@@ -10928,6 +11082,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10928 if (CHIP_IS_E3B0(bp)) 11082 if (CHIP_IS_E3B0(bp))
10929 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 11083 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
10930 11084
11085 /* We need at least one default status block for slow-path events,
11086 * second status block for the L2 queue, and a third status block for
11087 * CNIC if supproted.
11088 */
11089 if (CNIC_SUPPORT(bp))
11090 bp->min_msix_vec_cnt = 3;
11091 else
11092 bp->min_msix_vec_cnt = 2;
11093 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11094
10931 return rc; 11095 return rc;
10932} 11096}
10933 11097
@@ -11164,11 +11328,9 @@ void bnx2x_set_rx_mode(struct net_device *dev)
11164 } 11328 }
11165 11329
11166 bp->rx_mode = rx_mode; 11330 bp->rx_mode = rx_mode;
11167#ifdef BCM_CNIC
11168 /* handle ISCSI SD mode */ 11331 /* handle ISCSI SD mode */
11169 if (IS_MF_ISCSI_SD(bp)) 11332 if (IS_MF_ISCSI_SD(bp))
11170 bp->rx_mode = BNX2X_RX_MODE_NONE; 11333 bp->rx_mode = BNX2X_RX_MODE_NONE;
11171#endif
11172 11334
11173 /* Schedule the rx_mode command */ 11335 /* Schedule the rx_mode command */
11174 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11336 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -11280,7 +11442,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11280#endif 11442#endif
11281 .ndo_setup_tc = bnx2x_setup_tc, 11443 .ndo_setup_tc = bnx2x_setup_tc,
11282 11444
11283#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 11445#ifdef NETDEV_FCOE_WWNN
11284 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11446 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11285#endif 11447#endif
11286}; 11448};
@@ -11746,9 +11908,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11746{ 11908{
11747 int cid_count = BNX2X_L2_MAX_CID(bp); 11909 int cid_count = BNX2X_L2_MAX_CID(bp);
11748 11910
11749#ifdef BCM_CNIC 11911 if (CNIC_SUPPORT(bp))
11750 cid_count += CNIC_CID_MAX; 11912 cid_count += CNIC_CID_MAX;
11751#endif
11752 return roundup(cid_count, QM_CID_ROUND); 11913 return roundup(cid_count, QM_CID_ROUND);
11753} 11914}
11754 11915
@@ -11758,7 +11919,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11758 * @dev: pci device 11919 * @dev: pci device
11759 * 11920 *
11760 */ 11921 */
11761static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11922static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
11923 int cnic_cnt)
11762{ 11924{
11763 int pos; 11925 int pos;
11764 u16 control; 11926 u16 control;
@@ -11770,7 +11932,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11770 * one fast path queue: one FP queue + SB for CNIC 11932 * one fast path queue: one FP queue + SB for CNIC
11771 */ 11933 */
11772 if (!pos) 11934 if (!pos)
11773 return 1 + CNIC_PRESENT; 11935 return 1 + cnic_cnt;
11774 11936
11775 /* 11937 /*
11776 * The value in the PCI configuration space is the index of the last 11938 * The value in the PCI configuration space is the index of the last
@@ -11790,6 +11952,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11790 int pcie_width, pcie_speed; 11952 int pcie_width, pcie_speed;
11791 int rc, max_non_def_sbs; 11953 int rc, max_non_def_sbs;
11792 int rx_count, tx_count, rss_count, doorbell_size; 11954 int rx_count, tx_count, rss_count, doorbell_size;
11955 int cnic_cnt;
11793 /* 11956 /*
11794 * An estimated maximum supported CoS number according to the chip 11957 * An estimated maximum supported CoS number according to the chip
11795 * version. 11958 * version.
@@ -11833,21 +11996,22 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11833 return -ENODEV; 11996 return -ENODEV;
11834 } 11997 }
11835 11998
11836 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 11999 cnic_cnt = 1;
12000 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
11837 12001
11838 WARN_ON(!max_non_def_sbs); 12002 WARN_ON(!max_non_def_sbs);
11839 12003
11840 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12004 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
11841 rss_count = max_non_def_sbs - CNIC_PRESENT; 12005 rss_count = max_non_def_sbs - cnic_cnt;
11842 12006
11843 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 12007 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
11844 rx_count = rss_count + FCOE_PRESENT; 12008 rx_count = rss_count + cnic_cnt;
11845 12009
11846 /* 12010 /*
11847 * Maximum number of netdev Tx queues: 12011 * Maximum number of netdev Tx queues:
11848 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 12012 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
11849 */ 12013 */
11850 tx_count = rss_count * max_cos_est + FCOE_PRESENT; 12014 tx_count = rss_count * max_cos_est + cnic_cnt;
11851 12015
11852 /* dev zeroed in init_etherdev */ 12016 /* dev zeroed in init_etherdev */
11853 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 12017 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11858,6 +12022,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11858 12022
11859 bp->igu_sb_cnt = max_non_def_sbs; 12023 bp->igu_sb_cnt = max_non_def_sbs;
11860 bp->msg_enable = debug; 12024 bp->msg_enable = debug;
12025 bp->cnic_support = cnic_cnt;
12026
11861 pci_set_drvdata(pdev, dev); 12027 pci_set_drvdata(pdev, dev);
11862 12028
11863 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 12029 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
@@ -11866,6 +12032,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11866 return rc; 12032 return rc;
11867 } 12033 }
11868 12034
12035 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
11869 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 12036 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
11870 12037
11871 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 12038 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
@@ -11898,14 +12065,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11898 /* calc qm_cid_count */ 12065 /* calc qm_cid_count */
11899 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 12066 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
11900 12067
11901#ifdef BCM_CNIC 12068 /* disable FCOE L2 queue for E1x*/
11902 /* disable FCOE L2 queue for E1x */
11903 if (CHIP_IS_E1x(bp)) 12069 if (CHIP_IS_E1x(bp))
11904 bp->flags |= NO_FCOE_FLAG; 12070 bp->flags |= NO_FCOE_FLAG;
11905 12071
11906#endif
11907
11908
11909 /* Set bp->num_queues for MSI-X mode*/ 12072 /* Set bp->num_queues for MSI-X mode*/
11910 bnx2x_set_num_queues(bp); 12073 bnx2x_set_num_queues(bp);
11911 12074
@@ -11920,14 +12083,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11920 goto init_one_exit; 12083 goto init_one_exit;
11921 } 12084 }
11922 12085
11923#ifdef BCM_CNIC 12086
11924 if (!NO_FCOE(bp)) { 12087 if (!NO_FCOE(bp)) {
11925 /* Add storage MAC address */ 12088 /* Add storage MAC address */
11926 rtnl_lock(); 12089 rtnl_lock();
11927 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12090 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
11928 rtnl_unlock(); 12091 rtnl_unlock();
11929 } 12092 }
11930#endif
11931 12093
11932 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12094 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11933 12095
@@ -11972,14 +12134,12 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11972 } 12134 }
11973 bp = netdev_priv(dev); 12135 bp = netdev_priv(dev);
11974 12136
11975#ifdef BCM_CNIC
11976 /* Delete storage MAC address */ 12137 /* Delete storage MAC address */
11977 if (!NO_FCOE(bp)) { 12138 if (!NO_FCOE(bp)) {
11978 rtnl_lock(); 12139 rtnl_lock();
11979 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12140 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
11980 rtnl_unlock(); 12141 rtnl_unlock();
11981 } 12142 }
11982#endif
11983 12143
11984#ifdef BCM_DCBNL 12144#ifdef BCM_DCBNL
11985 /* Delete app tlvs from dcbnl */ 12145 /* Delete app tlvs from dcbnl */
@@ -12027,15 +12187,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12027 12187
12028 bp->rx_mode = BNX2X_RX_MODE_NONE; 12188 bp->rx_mode = BNX2X_RX_MODE_NONE;
12029 12189
12030#ifdef BCM_CNIC 12190 if (CNIC_LOADED(bp))
12031 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 12191 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12032#endif 12192
12033 /* Stop Tx */ 12193 /* Stop Tx */
12034 bnx2x_tx_disable(bp); 12194 bnx2x_tx_disable(bp);
12035 12195
12036 bnx2x_netif_stop(bp, 0); 12196 bnx2x_netif_stop(bp, 0);
12037 /* Delete all NAPI objects */ 12197 /* Delete all NAPI objects */
12038 bnx2x_del_all_napi(bp); 12198 bnx2x_del_all_napi(bp);
12199 if (CNIC_LOADED(bp))
12200 bnx2x_del_all_napi_cnic(bp);
12039 12201
12040 del_timer_sync(&bp->timer); 12202 del_timer_sync(&bp->timer);
12041 12203
@@ -12226,7 +12388,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp)
12226module_init(bnx2x_init); 12388module_init(bnx2x_init);
12227module_exit(bnx2x_cleanup); 12389module_exit(bnx2x_cleanup);
12228 12390
12229#ifdef BCM_CNIC
12230/** 12391/**
12231 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 12392 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
12232 * 12393 *
@@ -12679,12 +12840,31 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12679{ 12840{
12680 struct bnx2x *bp = netdev_priv(dev); 12841 struct bnx2x *bp = netdev_priv(dev);
12681 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12842 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12843 int rc;
12844
12845 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
12682 12846
12683 if (ops == NULL) { 12847 if (ops == NULL) {
12684 BNX2X_ERR("NULL ops received\n"); 12848 BNX2X_ERR("NULL ops received\n");
12685 return -EINVAL; 12849 return -EINVAL;
12686 } 12850 }
12687 12851
12852 if (!CNIC_SUPPORT(bp)) {
12853 BNX2X_ERR("Can't register CNIC when not supported\n");
12854 return -EOPNOTSUPP;
12855 }
12856
12857 if (!CNIC_LOADED(bp)) {
12858 rc = bnx2x_load_cnic(bp);
12859 if (rc) {
12860 BNX2X_ERR("CNIC-related load failed\n");
12861 return rc;
12862 }
12863
12864 }
12865
12866 bp->cnic_enabled = true;
12867
12688 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 12868 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12689 if (!bp->cnic_kwq) 12869 if (!bp->cnic_kwq)
12690 return -ENOMEM; 12870 return -ENOMEM;
@@ -12776,5 +12956,4 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12776} 12956}
12777EXPORT_SYMBOL(bnx2x_cnic_probe); 12957EXPORT_SYMBOL(bnx2x_cnic_probe);
12778 12958
12779#endif /* BCM_CNIC */
12780 12959
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 1b1999d34c71..7d93adb57f31 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2107,6 +2107,7 @@
2107#define NIG_REG_LLH1_ERROR_MASK 0x10090 2107#define NIG_REG_LLH1_ERROR_MASK 0x10090
2108/* [RW 8] event id for llh1 */ 2108/* [RW 8] event id for llh1 */
2109#define NIG_REG_LLH1_EVENT_ID 0x10088 2109#define NIG_REG_LLH1_EVENT_ID 0x10088
2110#define NIG_REG_LLH1_FUNC_EN 0x16104
2110#define NIG_REG_LLH1_FUNC_MEM 0x161c0 2111#define NIG_REG_LLH1_FUNC_MEM 0x161c0
2111#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 2112#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
2112#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 2113#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
@@ -2302,6 +2303,15 @@
2302 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to 2303 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2303 * accommodate the 9 input clients to ETS arbiter. */ 2304 * accommodate the 9 input clients to ETS arbiter. */
2304#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 2305#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
2306/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
2307 * packets to BRB LB interface to forward the packet to the host. All
2308 * packets from MCP are forwarded to the network when this bit is cleared -
2309 * regardless of the configured destination in tx_mng_destination register.
2310 * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
2311 * for BRB LB interface is bypassed and PBF LB traffic is always selected to
2312 * send to BRB LB.
2313 */
2314#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4
2305#define NIG_REG_P1_HWPFC_ENABLE 0x181d0 2315#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
2306#define NIG_REG_P1_MAC_IN_EN 0x185c0 2316#define NIG_REG_P1_MAC_IN_EN 0x185c0
2307/* [RW 1] Output enable for TX MAC interface */ 2317/* [RW 1] Output enable for TX MAC interface */
@@ -2418,6 +2428,12 @@
2418#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 2428#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
2419/* [R 1] TX FIFO for transmitting data to MAC is empty. */ 2429/* [R 1] TX FIFO for transmitting data to MAC is empty. */
2420#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 2430#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
2431/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
2432 * packets to BRB LB interface to forward the packet to the host. All
2433 * packets from MCP are forwarded to the network when this bit is cleared -
2434 * regardless of the configured destination in tx_mng_destination register.
2435 */
2436#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8
2421/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets 2437/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
2422 forwarded to the host. */ 2438 forwarded to the host. */
2423#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 2439#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 614981c02264..b8b4b749daab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5350,12 +5350,24 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && 5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5352 next_state = BNX2X_F_STATE_STARTED; 5352 next_state = BNX2X_F_STATE_STARTED;
5353
5354 /* Switch_update ramrod can be sent in either started or
5355 * tx_stopped state, and it doesn't change the state.
5356 */
5357 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5358 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5359 next_state = BNX2X_F_STATE_STARTED;
5360
5353 else if (cmd == BNX2X_F_CMD_TX_STOP) 5361 else if (cmd == BNX2X_F_CMD_TX_STOP)
5354 next_state = BNX2X_F_STATE_TX_STOPPED; 5362 next_state = BNX2X_F_STATE_TX_STOPPED;
5355 5363
5356 break; 5364 break;
5357 case BNX2X_F_STATE_TX_STOPPED: 5365 case BNX2X_F_STATE_TX_STOPPED:
5358 if (cmd == BNX2X_F_CMD_TX_START) 5366 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5367 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5368 next_state = BNX2X_F_STATE_TX_STOPPED;
5369
5370 else if (cmd == BNX2X_F_CMD_TX_START)
5359 next_state = BNX2X_F_STATE_STARTED; 5371 next_state = BNX2X_F_STATE_STARTED;
5360 5372
5361 break; 5373 break;
@@ -5637,6 +5649,28 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5637 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5649 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5638} 5650}
5639 5651
5652static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5653 struct bnx2x_func_state_params *params)
5654{
5655 struct bnx2x_func_sp_obj *o = params->f_obj;
5656 struct function_update_data *rdata =
5657 (struct function_update_data *)o->rdata;
5658 dma_addr_t data_mapping = o->rdata_mapping;
5659 struct bnx2x_func_switch_update_params *switch_update_params =
5660 &params->params.switch_update;
5661
5662 memset(rdata, 0, sizeof(*rdata));
5663
5664 /* Fill the ramrod data with provided parameters */
5665 rdata->tx_switch_suspend_change_flg = 1;
5666 rdata->tx_switch_suspend = switch_update_params->suspend;
5667 rdata->echo = SWITCH_UPDATE;
5668
5669 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5670 U64_HI(data_mapping),
5671 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5672}
5673
5640static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, 5674static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5641 struct bnx2x_func_state_params *params) 5675 struct bnx2x_func_state_params *params)
5642{ 5676{
@@ -5657,6 +5691,7 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5657 cpu_to_le16(afex_update_params->afex_default_vlan); 5691 cpu_to_le16(afex_update_params->afex_default_vlan);
5658 rdata->allowed_priorities_change_flg = 1; 5692 rdata->allowed_priorities_change_flg = 1;
5659 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5693 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5694 rdata->echo = AFEX_UPDATE;
5660 5695
5661 /* No need for an explicit memory barrier here as long we would 5696 /* No need for an explicit memory barrier here as long we would
5662 * need to ensure the ordering of writing to the SPQ element 5697 * need to ensure the ordering of writing to the SPQ element
@@ -5773,6 +5808,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5773 return bnx2x_func_send_tx_stop(bp, params); 5808 return bnx2x_func_send_tx_stop(bp, params);
5774 case BNX2X_F_CMD_TX_START: 5809 case BNX2X_F_CMD_TX_START:
5775 return bnx2x_func_send_tx_start(bp, params); 5810 return bnx2x_func_send_tx_start(bp, params);
5811 case BNX2X_F_CMD_SWITCH_UPDATE:
5812 return bnx2x_func_send_switch_update(bp, params);
5776 default: 5813 default:
5777 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5814 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5778 return -EINVAL; 5815 return -EINVAL;
@@ -5818,16 +5855,30 @@ int bnx2x_func_state_change(struct bnx2x *bp,
5818 struct bnx2x_func_state_params *params) 5855 struct bnx2x_func_state_params *params)
5819{ 5856{
5820 struct bnx2x_func_sp_obj *o = params->f_obj; 5857 struct bnx2x_func_sp_obj *o = params->f_obj;
5821 int rc; 5858 int rc, cnt = 300;
5822 enum bnx2x_func_cmd cmd = params->cmd; 5859 enum bnx2x_func_cmd cmd = params->cmd;
5823 unsigned long *pending = &o->pending; 5860 unsigned long *pending = &o->pending;
5824 5861
5825 mutex_lock(&o->one_pending_mutex); 5862 mutex_lock(&o->one_pending_mutex);
5826 5863
5827 /* Check that the requested transition is legal */ 5864 /* Check that the requested transition is legal */
5828 if (o->check_transition(bp, o, params)) { 5865 rc = o->check_transition(bp, o, params);
5866 if ((rc == -EBUSY) &&
5867 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5868 while ((rc == -EBUSY) && (--cnt > 0)) {
5869 mutex_unlock(&o->one_pending_mutex);
5870 msleep(10);
5871 mutex_lock(&o->one_pending_mutex);
5872 rc = o->check_transition(bp, o, params);
5873 }
5874 if (rc == -EBUSY) {
5875 mutex_unlock(&o->one_pending_mutex);
5876 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5877 return rc;
5878 }
5879 } else if (rc) {
5829 mutex_unlock(&o->one_pending_mutex); 5880 mutex_unlock(&o->one_pending_mutex);
5830 return -EINVAL; 5881 return rc;
5831 } 5882 }
5832 5883
5833 /* Set "pending" bit */ 5884 /* Set "pending" bit */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index acf2fe4ca608..adbd91b1bdfc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -40,6 +40,12 @@ enum {
40 * pending commands list. 40 * pending commands list.
41 */ 41 */
42 RAMROD_CONT, 42 RAMROD_CONT,
43 /* If there is another pending ramrod, wait until it finishes and
44 * re-try to submit this one. This flag can be set only in sleepable
45 * context, and should not be set from the context that completes the
46 * ramrods as deadlock will occur.
47 */
48 RAMROD_RETRY,
43}; 49};
44 50
45typedef enum { 51typedef enum {
@@ -1061,6 +1067,7 @@ enum bnx2x_func_cmd {
1061 BNX2X_F_CMD_AFEX_VIFLISTS, 1067 BNX2X_F_CMD_AFEX_VIFLISTS,
1062 BNX2X_F_CMD_TX_STOP, 1068 BNX2X_F_CMD_TX_STOP,
1063 BNX2X_F_CMD_TX_START, 1069 BNX2X_F_CMD_TX_START,
1070 BNX2X_F_CMD_SWITCH_UPDATE,
1064 BNX2X_F_CMD_MAX, 1071 BNX2X_F_CMD_MAX,
1065}; 1072};
1066 1073
@@ -1103,6 +1110,10 @@ struct bnx2x_func_start_params {
1103 u8 network_cos_mode; 1110 u8 network_cos_mode;
1104}; 1111};
1105 1112
1113struct bnx2x_func_switch_update_params {
1114 u8 suspend;
1115};
1116
1106struct bnx2x_func_afex_update_params { 1117struct bnx2x_func_afex_update_params {
1107 u16 vif_id; 1118 u16 vif_id;
1108 u16 afex_default_vlan; 1119 u16 afex_default_vlan;
@@ -1136,6 +1147,7 @@ struct bnx2x_func_state_params {
1136 struct bnx2x_func_hw_init_params hw_init; 1147 struct bnx2x_func_hw_init_params hw_init;
1137 struct bnx2x_func_hw_reset_params hw_reset; 1148 struct bnx2x_func_hw_reset_params hw_reset;
1138 struct bnx2x_func_start_params start; 1149 struct bnx2x_func_start_params start;
1150 struct bnx2x_func_switch_update_params switch_update;
1139 struct bnx2x_func_afex_update_params afex_update; 1151 struct bnx2x_func_afex_update_params afex_update;
1140 struct bnx2x_func_afex_viflists_params afex_viflists; 1152 struct bnx2x_func_afex_viflists_params afex_viflists;
1141 struct bnx2x_func_tx_start_params tx_start; 1153 struct bnx2x_func_tx_start_params tx_start;