diff options
author | Vladislav Zolotarov <vladz@broadcom.com> | 2010-12-13 00:44:01 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-16 16:15:54 -0500 |
commit | ec6ba945211b1c1f97d3d19fe60f166c9a92241d (patch) | |
tree | ad9f313c0a644bf8d4d113f4605d778b6b100178 /drivers/net/bnx2x | |
parent | a3d22a68d752ccc1a01bb0a64dd70b7a98bf9e23 (diff) |
bnx2x: add FCoE ring
Includes new driver structures and FW/HW configuration for FCoE ring
Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Shmulik Ravid-Rabinovitz <shmulikr@broadcom.com>
Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r-- | drivers/net/bnx2x/bnx2x.h | 118 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 81 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.h | 53 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_ethtool.c | 290 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_main.c | 315 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.c | 13 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.h | 2 |
7 files changed, 631 insertions, 241 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 7e4d682f0df1..475725c566d7 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -13,6 +13,8 @@ | |||
13 | 13 | ||
14 | #ifndef BNX2X_H | 14 | #ifndef BNX2X_H |
15 | #define BNX2X_H | 15 | #define BNX2X_H |
16 | #include <linux/netdevice.h> | ||
17 | #include <linux/types.h> | ||
16 | 18 | ||
17 | /* compilation time flags */ | 19 | /* compilation time flags */ |
18 | 20 | ||
@@ -199,10 +201,25 @@ void bnx2x_panic_dump(struct bnx2x *bp); | |||
199 | /* EQ completions */ | 201 | /* EQ completions */ |
200 | #define HC_SP_INDEX_EQ_CONS 7 | 202 | #define HC_SP_INDEX_EQ_CONS 7 |
201 | 203 | ||
204 | /* FCoE L2 connection completions */ | ||
205 | #define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 | ||
206 | #define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 | ||
202 | /* iSCSI L2 */ | 207 | /* iSCSI L2 */ |
203 | #define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 | 208 | #define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 |
204 | #define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 | 209 | #define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 |
205 | 210 | ||
211 | /* Special clients parameters */ | ||
212 | |||
213 | /* SB indices */ | ||
214 | /* FCoE L2 */ | ||
215 | #define BNX2X_FCOE_L2_RX_INDEX \ | ||
216 | (&bp->def_status_blk->sp_sb.\ | ||
217 | index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) | ||
218 | |||
219 | #define BNX2X_FCOE_L2_TX_INDEX \ | ||
220 | (&bp->def_status_blk->sp_sb.\ | ||
221 | index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) | ||
222 | |||
206 | /** | 223 | /** |
207 | * CIDs and CLIDs: | 224 | * CIDs and CLIDs: |
208 | * CLIDs below is a CLID for func 0, then the CLID for other | 225 | * CLIDs below is a CLID for func 0, then the CLID for other |
@@ -215,12 +232,19 @@ void bnx2x_panic_dump(struct bnx2x *bp); | |||
215 | #define BNX2X_ISCSI_ETH_CL_ID 17 | 232 | #define BNX2X_ISCSI_ETH_CL_ID 17 |
216 | #define BNX2X_ISCSI_ETH_CID 17 | 233 | #define BNX2X_ISCSI_ETH_CID 17 |
217 | 234 | ||
235 | /* FCoE L2 */ | ||
236 | #define BNX2X_FCOE_ETH_CL_ID 18 | ||
237 | #define BNX2X_FCOE_ETH_CID 18 | ||
238 | |||
218 | /** Additional rings budgeting */ | 239 | /** Additional rings budgeting */ |
219 | #ifdef BCM_CNIC | 240 | #ifdef BCM_CNIC |
220 | #define CNIC_CONTEXT_USE 1 | 241 | #define CNIC_CONTEXT_USE 1 |
242 | #define FCOE_CONTEXT_USE 1 | ||
221 | #else | 243 | #else |
222 | #define CNIC_CONTEXT_USE 0 | 244 | #define CNIC_CONTEXT_USE 0 |
245 | #define FCOE_CONTEXT_USE 0 | ||
223 | #endif /* BCM_CNIC */ | 246 | #endif /* BCM_CNIC */ |
247 | #define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE) | ||
224 | 248 | ||
225 | #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ | 249 | #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ |
226 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR | 250 | AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |
@@ -401,6 +425,17 @@ struct bnx2x_fastpath { | |||
401 | }; | 425 | }; |
402 | 426 | ||
403 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | 427 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
428 | #ifdef BCM_CNIC | ||
429 | /* FCoE L2 `fastpath' is right after the eth entries */ | ||
430 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) | ||
431 | #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) | ||
432 | #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) | ||
433 | #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) | ||
434 | #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) | ||
435 | #else | ||
436 | #define IS_FCOE_FP(fp) false | ||
437 | #define IS_FCOE_IDX(idx) false | ||
438 | #endif | ||
404 | 439 | ||
405 | 440 | ||
406 | /* MC hsi */ | 441 | /* MC hsi */ |
@@ -669,7 +704,9 @@ struct bnx2x_port { | |||
669 | enum { | 704 | enum { |
670 | CAM_ETH_LINE = 0, | 705 | CAM_ETH_LINE = 0, |
671 | CAM_ISCSI_ETH_LINE, | 706 | CAM_ISCSI_ETH_LINE, |
672 | CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE | 707 | CAM_FIP_ETH_LINE, |
708 | CAM_FIP_MCAST_LINE, | ||
709 | CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE | ||
673 | }; | 710 | }; |
674 | /* number of MACs per function in NIG memory - used for SI mode */ | 711 | /* number of MACs per function in NIG memory - used for SI mode */ |
675 | #define NIG_LLH_FUNC_MEM_SIZE 16 | 712 | #define NIG_LLH_FUNC_MEM_SIZE 16 |
@@ -714,6 +751,14 @@ enum { | |||
714 | */ | 751 | */ |
715 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) | 752 | #define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) |
716 | 753 | ||
754 | /* | ||
755 | * The number of FP-SB allocated by the driver == max number of regular L2 | ||
756 | * queues + 1 for the CNIC which also consumes an FP-SB | ||
757 | */ | ||
758 | #define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) | ||
759 | #define NUM_IGU_SB_REQUIRED(cid_cnt) \ | ||
760 | (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) | ||
761 | |||
717 | union cdu_context { | 762 | union cdu_context { |
718 | struct eth_context eth; | 763 | struct eth_context eth; |
719 | char pad[1024]; | 764 | char pad[1024]; |
@@ -726,7 +771,8 @@ union cdu_context { | |||
726 | 771 | ||
727 | #ifdef BCM_CNIC | 772 | #ifdef BCM_CNIC |
728 | #define CNIC_ISCSI_CID_MAX 256 | 773 | #define CNIC_ISCSI_CID_MAX 256 |
729 | #define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX) | 774 | #define CNIC_FCOE_CID_MAX 2048 |
775 | #define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) | ||
730 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) | 776 | #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) |
731 | #endif | 777 | #endif |
732 | 778 | ||
@@ -922,6 +968,10 @@ struct bnx2x { | |||
922 | #define DISABLE_MSI_FLAG 0x200 | 968 | #define DISABLE_MSI_FLAG 0x200 |
923 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) | 969 | #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) |
924 | #define MF_FUNC_DIS 0x1000 | 970 | #define MF_FUNC_DIS 0x1000 |
971 | #define FCOE_MACS_SET 0x2000 | ||
972 | #define NO_FCOE_FLAG 0x4000 | ||
973 | |||
974 | #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) | ||
925 | 975 | ||
926 | int pf_num; /* absolute PF number */ | 976 | int pf_num; /* absolute PF number */ |
927 | int pfid; /* per-path PF number */ | 977 | int pfid; /* per-path PF number */ |
@@ -1069,7 +1119,8 @@ struct bnx2x { | |||
1069 | u16 cnic_kwq_pending; | 1119 | u16 cnic_kwq_pending; |
1070 | u16 cnic_spq_pending; | 1120 | u16 cnic_spq_pending; |
1071 | struct mutex cnic_mutex; | 1121 | struct mutex cnic_mutex; |
1072 | u8 iscsi_mac[6]; | 1122 | u8 iscsi_mac[ETH_ALEN]; |
1123 | u8 fip_mac[ETH_ALEN]; | ||
1073 | #endif | 1124 | #endif |
1074 | 1125 | ||
1075 | int dmae_ready; | 1126 | int dmae_ready; |
@@ -1159,10 +1210,17 @@ struct bnx2x { | |||
1159 | #define RSS_IPV6_TCP_CAP 0x0008 | 1210 | #define RSS_IPV6_TCP_CAP 0x0008 |
1160 | 1211 | ||
1161 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) | 1212 | #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) |
1213 | #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) | ||
1214 | |||
1215 | /* ethtool statistics are displayed for all regular ethernet queues and the | ||
1216 | * fcoe L2 queue if not disabled | ||
1217 | */ | ||
1218 | #define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \ | ||
1219 | (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE)) | ||
1220 | |||
1162 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) | 1221 | #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) |
1163 | 1222 | ||
1164 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) | 1223 | #define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) |
1165 | #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) | ||
1166 | 1224 | ||
1167 | #define RSS_IPV4_CAP_MASK \ | 1225 | #define RSS_IPV4_CAP_MASK \ |
1168 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | 1226 | TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY |
@@ -1255,6 +1313,7 @@ struct bnx2x_client_ramrod_params { | |||
1255 | u16 cl_id; | 1313 | u16 cl_id; |
1256 | u32 cid; | 1314 | u32 cid; |
1257 | u8 poll; | 1315 | u8 poll; |
1316 | #define CLIENT_IS_FCOE 0x01 | ||
1258 | #define CLIENT_IS_LEADING_RSS 0x02 | 1317 | #define CLIENT_IS_LEADING_RSS 0x02 |
1259 | u8 flags; | 1318 | u8 flags; |
1260 | }; | 1319 | }; |
@@ -1287,11 +1346,54 @@ struct bnx2x_func_init_params { | |||
1287 | u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ | 1346 | u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ |
1288 | }; | 1347 | }; |
1289 | 1348 | ||
1349 | #define for_each_eth_queue(bp, var) \ | ||
1350 | for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | ||
1351 | |||
1352 | #define for_each_nondefault_eth_queue(bp, var) \ | ||
1353 | for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) | ||
1354 | |||
1355 | #define for_each_napi_queue(bp, var) \ | ||
1356 | for (var = 0; \ | ||
1357 | var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \ | ||
1358 | if (skip_queue(bp, var)) \ | ||
1359 | continue; \ | ||
1360 | else | ||
1361 | |||
1290 | #define for_each_queue(bp, var) \ | 1362 | #define for_each_queue(bp, var) \ |
1291 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) | 1363 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ |
1364 | if (skip_queue(bp, var)) \ | ||
1365 | continue; \ | ||
1366 | else | ||
1367 | |||
1368 | #define for_each_rx_queue(bp, var) \ | ||
1369 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ | ||
1370 | if (skip_rx_queue(bp, var)) \ | ||
1371 | continue; \ | ||
1372 | else | ||
1373 | |||
1374 | #define for_each_tx_queue(bp, var) \ | ||
1375 | for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ | ||
1376 | if (skip_tx_queue(bp, var)) \ | ||
1377 | continue; \ | ||
1378 | else | ||
1379 | |||
1292 | #define for_each_nondefault_queue(bp, var) \ | 1380 | #define for_each_nondefault_queue(bp, var) \ |
1293 | for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) | 1381 | for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \ |
1382 | if (skip_queue(bp, var)) \ | ||
1383 | continue; \ | ||
1384 | else | ||
1294 | 1385 | ||
1386 | /* skip rx queue | ||
1387 | * if FCOE l2 support is diabled and this is the fcoe L2 queue | ||
1388 | */ | ||
1389 | #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | ||
1390 | |||
1391 | /* skip tx queue | ||
1392 | * if FCOE l2 support is diabled and this is the fcoe L2 queue | ||
1393 | */ | ||
1394 | #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | ||
1395 | |||
1396 | #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | ||
1295 | 1397 | ||
1296 | #define WAIT_RAMROD_POLL 0x01 | 1398 | #define WAIT_RAMROD_POLL 0x01 |
1297 | #define WAIT_RAMROD_COMMON 0x02 | 1399 | #define WAIT_RAMROD_COMMON 0x02 |
@@ -1615,10 +1717,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1615 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ | 1717 | MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ |
1616 | (T_ETH_MAC_COMMAND_INVALIDATE)) | 1718 | (T_ETH_MAC_COMMAND_INVALIDATE)) |
1617 | 1719 | ||
1618 | #define CAM_INVALIDATE(x) \ | ||
1619 | (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) | ||
1620 | |||
1621 | |||
1622 | /* Number of u32 elements in MC hash array */ | 1720 | /* Number of u32 elements in MC hash array */ |
1623 | #define MC_HASH_SIZE 8 | 1721 | #define MC_HASH_SIZE 8 |
1624 | #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ | 1722 | #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 236c00c3f568..fa12365faec2 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -827,7 +827,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
827 | DP(NETIF_MSG_IFUP, | 827 | DP(NETIF_MSG_IFUP, |
828 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); | 828 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); |
829 | 829 | ||
830 | for_each_queue(bp, j) { | 830 | for_each_rx_queue(bp, j) { |
831 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 831 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
832 | 832 | ||
833 | if (!fp->disable_tpa) { | 833 | if (!fp->disable_tpa) { |
@@ -880,7 +880,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
880 | } | 880 | } |
881 | } | 881 | } |
882 | 882 | ||
883 | for_each_queue(bp, j) { | 883 | for_each_rx_queue(bp, j) { |
884 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 884 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
885 | 885 | ||
886 | fp->rx_bd_cons = 0; | 886 | fp->rx_bd_cons = 0; |
@@ -911,7 +911,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
911 | { | 911 | { |
912 | int i; | 912 | int i; |
913 | 913 | ||
914 | for_each_queue(bp, i) { | 914 | for_each_tx_queue(bp, i) { |
915 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 915 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
916 | 916 | ||
917 | u16 bd_cons = fp->tx_bd_cons; | 917 | u16 bd_cons = fp->tx_bd_cons; |
@@ -929,7 +929,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
929 | { | 929 | { |
930 | int i, j; | 930 | int i, j; |
931 | 931 | ||
932 | for_each_queue(bp, j) { | 932 | for_each_rx_queue(bp, j) { |
933 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 933 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
934 | 934 | ||
935 | for (i = 0; i < NUM_RX_BD; i++) { | 935 | for (i = 0; i < NUM_RX_BD; i++) { |
@@ -970,7 +970,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |||
970 | #ifdef BCM_CNIC | 970 | #ifdef BCM_CNIC |
971 | offset++; | 971 | offset++; |
972 | #endif | 972 | #endif |
973 | for_each_queue(bp, i) { | 973 | for_each_eth_queue(bp, i) { |
974 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | 974 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " |
975 | "state %x\n", i, bp->msix_table[i + offset].vector, | 975 | "state %x\n", i, bp->msix_table[i + offset].vector, |
976 | bnx2x_fp(bp, i, state)); | 976 | bnx2x_fp(bp, i, state)); |
@@ -1004,14 +1004,14 @@ int bnx2x_enable_msix(struct bnx2x *bp) | |||
1004 | bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); | 1004 | bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); |
1005 | msix_vec++; | 1005 | msix_vec++; |
1006 | #endif | 1006 | #endif |
1007 | for_each_queue(bp, i) { | 1007 | for_each_eth_queue(bp, i) { |
1008 | bp->msix_table[msix_vec].entry = msix_vec; | 1008 | bp->msix_table[msix_vec].entry = msix_vec; |
1009 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | 1009 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " |
1010 | "(fastpath #%u)\n", msix_vec, msix_vec, i); | 1010 | "(fastpath #%u)\n", msix_vec, msix_vec, i); |
1011 | msix_vec++; | 1011 | msix_vec++; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1; | 1014 | req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1; |
1015 | 1015 | ||
1016 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); | 1016 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); |
1017 | 1017 | ||
@@ -1067,7 +1067,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
1067 | #ifdef BCM_CNIC | 1067 | #ifdef BCM_CNIC |
1068 | offset++; | 1068 | offset++; |
1069 | #endif | 1069 | #endif |
1070 | for_each_queue(bp, i) { | 1070 | for_each_eth_queue(bp, i) { |
1071 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1071 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1072 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | 1072 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", |
1073 | bp->dev->name, i); | 1073 | bp->dev->name, i); |
@@ -1084,7 +1084,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
1084 | fp->state = BNX2X_FP_STATE_IRQ; | 1084 | fp->state = BNX2X_FP_STATE_IRQ; |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | i = BNX2X_NUM_QUEUES(bp); | 1087 | i = BNX2X_NUM_ETH_QUEUES(bp); |
1088 | offset = 1 + CNIC_CONTEXT_USE; | 1088 | offset = 1 + CNIC_CONTEXT_USE; |
1089 | netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" | 1089 | netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" |
1090 | " ... fp[%d] %d\n", | 1090 | " ... fp[%d] %d\n", |
@@ -1131,7 +1131,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) | |||
1131 | { | 1131 | { |
1132 | int i; | 1132 | int i; |
1133 | 1133 | ||
1134 | for_each_queue(bp, i) | 1134 | for_each_napi_queue(bp, i) |
1135 | napi_enable(&bnx2x_fp(bp, i, napi)); | 1135 | napi_enable(&bnx2x_fp(bp, i, napi)); |
1136 | } | 1136 | } |
1137 | 1137 | ||
@@ -1139,7 +1139,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp) | |||
1139 | { | 1139 | { |
1140 | int i; | 1140 | int i; |
1141 | 1141 | ||
1142 | for_each_queue(bp, i) | 1142 | for_each_napi_queue(bp, i) |
1143 | napi_disable(&bnx2x_fp(bp, i, napi)); | 1143 | napi_disable(&bnx2x_fp(bp, i, napi)); |
1144 | } | 1144 | } |
1145 | 1145 | ||
@@ -1181,7 +1181,22 @@ void bnx2x_set_num_queues(struct bnx2x *bp) | |||
1181 | bp->num_queues = 1; | 1181 | bp->num_queues = 1; |
1182 | break; | 1182 | break; |
1183 | } | 1183 | } |
1184 | |||
1185 | /* Add special queues */ | ||
1186 | bp->num_queues += NONE_ETH_CONTEXT_USE; | ||
1187 | } | ||
1188 | |||
1189 | #ifdef BCM_CNIC | ||
1190 | static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp) | ||
1191 | { | ||
1192 | if (!NO_FCOE(bp)) { | ||
1193 | if (!IS_MF_SD(bp)) | ||
1194 | bnx2x_set_fip_eth_mac_addr(bp, 1); | ||
1195 | bnx2x_set_all_enode_macs(bp, 1); | ||
1196 | bp->flags |= FCOE_MACS_SET; | ||
1197 | } | ||
1184 | } | 1198 | } |
1199 | #endif | ||
1185 | 1200 | ||
1186 | static void bnx2x_release_firmware(struct bnx2x *bp) | 1201 | static void bnx2x_release_firmware(struct bnx2x *bp) |
1187 | { | 1202 | { |
@@ -1191,6 +1206,20 @@ static void bnx2x_release_firmware(struct bnx2x *bp) | |||
1191 | release_firmware(bp->firmware); | 1206 | release_firmware(bp->firmware); |
1192 | } | 1207 | } |
1193 | 1208 | ||
1209 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | ||
1210 | { | ||
1211 | int rc, num = bp->num_queues; | ||
1212 | |||
1213 | #ifdef BCM_CNIC | ||
1214 | if (NO_FCOE(bp)) | ||
1215 | num -= FCOE_CONTEXT_USE; | ||
1216 | |||
1217 | #endif | ||
1218 | netif_set_real_num_tx_queues(bp->dev, num); | ||
1219 | rc = netif_set_real_num_rx_queues(bp->dev, num); | ||
1220 | return rc; | ||
1221 | } | ||
1222 | |||
1194 | /* must be called with rtnl_lock */ | 1223 | /* must be called with rtnl_lock */ |
1195 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 1224 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
1196 | { | 1225 | { |
@@ -1217,10 +1246,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1217 | if (bnx2x_alloc_mem(bp)) | 1246 | if (bnx2x_alloc_mem(bp)) |
1218 | return -ENOMEM; | 1247 | return -ENOMEM; |
1219 | 1248 | ||
1220 | netif_set_real_num_tx_queues(bp->dev, bp->num_queues); | 1249 | rc = bnx2x_set_real_num_queues(bp); |
1221 | rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues); | ||
1222 | if (rc) { | 1250 | if (rc) { |
1223 | BNX2X_ERR("Unable to update real_num_rx_queues\n"); | 1251 | BNX2X_ERR("Unable to set real_num_queues\n"); |
1224 | goto load_error0; | 1252 | goto load_error0; |
1225 | } | 1253 | } |
1226 | 1254 | ||
@@ -1228,6 +1256,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1228 | bnx2x_fp(bp, i, disable_tpa) = | 1256 | bnx2x_fp(bp, i, disable_tpa) = |
1229 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 1257 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
1230 | 1258 | ||
1259 | #ifdef BCM_CNIC | ||
1260 | /* We don't want TPA on FCoE L2 ring */ | ||
1261 | bnx2x_fcoe(bp, disable_tpa) = 1; | ||
1262 | #endif | ||
1231 | bnx2x_napi_enable(bp); | 1263 | bnx2x_napi_enable(bp); |
1232 | 1264 | ||
1233 | /* Send LOAD_REQUEST command to MCP | 1265 | /* Send LOAD_REQUEST command to MCP |
@@ -1358,6 +1390,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1358 | /* Now when Clients are configured we are ready to work */ | 1390 | /* Now when Clients are configured we are ready to work */ |
1359 | bp->state = BNX2X_STATE_OPEN; | 1391 | bp->state = BNX2X_STATE_OPEN; |
1360 | 1392 | ||
1393 | #ifdef BCM_CNIC | ||
1394 | bnx2x_set_fcoe_eth_macs(bp); | ||
1395 | #endif | ||
1396 | |||
1361 | bnx2x_set_eth_mac(bp, 1); | 1397 | bnx2x_set_eth_mac(bp, 1); |
1362 | 1398 | ||
1363 | if (bp->port.pmf) | 1399 | if (bp->port.pmf) |
@@ -1416,7 +1452,7 @@ load_error3: | |||
1416 | 1452 | ||
1417 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 1453 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
1418 | bnx2x_free_skbs(bp); | 1454 | bnx2x_free_skbs(bp); |
1419 | for_each_queue(bp, i) | 1455 | for_each_rx_queue(bp, i) |
1420 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 1456 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
1421 | 1457 | ||
1422 | /* Release IRQs */ | 1458 | /* Release IRQs */ |
@@ -1487,7 +1523,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
1487 | 1523 | ||
1488 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 1524 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
1489 | bnx2x_free_skbs(bp); | 1525 | bnx2x_free_skbs(bp); |
1490 | for_each_queue(bp, i) | 1526 | for_each_rx_queue(bp, i) |
1491 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 1527 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
1492 | 1528 | ||
1493 | bnx2x_free_mem(bp); | 1529 | bnx2x_free_mem(bp); |
@@ -1591,6 +1627,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget) | |||
1591 | 1627 | ||
1592 | /* Fall out from the NAPI loop if needed */ | 1628 | /* Fall out from the NAPI loop if needed */ |
1593 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | 1629 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
1630 | #ifdef BCM_CNIC | ||
1631 | /* No need to update SB for FCoE L2 ring as long as | ||
1632 | * it's connected to the default SB and the SB | ||
1633 | * has been updated when NAPI was scheduled. | ||
1634 | */ | ||
1635 | if (IS_FCOE_FP(fp)) { | ||
1636 | napi_complete(napi); | ||
1637 | break; | ||
1638 | } | ||
1639 | #endif | ||
1640 | |||
1594 | bnx2x_update_fpsb_idx(fp); | 1641 | bnx2x_update_fpsb_idx(fp); |
1595 | /* bnx2x_has_rx_work() reads the status block, | 1642 | /* bnx2x_has_rx_work() reads the status block, |
1596 | * thus we need to ensure that status block indices | 1643 | * thus we need to ensure that status block indices |
@@ -2255,7 +2302,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) | |||
2255 | bp->fp = fp; | 2302 | bp->fp = fp; |
2256 | 2303 | ||
2257 | /* msix table */ | 2304 | /* msix table */ |
2258 | tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl), | 2305 | tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl), |
2259 | GFP_KERNEL); | 2306 | GFP_KERNEL); |
2260 | if (!tbl) | 2307 | if (!tbl) |
2261 | goto alloc_err; | 2308 | goto alloc_err; |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index cb8f2a040a18..4bb011358ed9 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -242,6 +242,30 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); | |||
242 | */ | 242 | */ |
243 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set); | 243 | void bnx2x_set_eth_mac(struct bnx2x *bp, int set); |
244 | 244 | ||
245 | #ifdef BCM_CNIC | ||
246 | /** | ||
247 | * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH | ||
248 | * MAC(s). This function will wait until the ramdord completion | ||
249 | * returns. | ||
250 | * | ||
251 | * @param bp driver handle | ||
252 | * @param set set or clear the CAM entry | ||
253 | * | ||
254 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. | ||
255 | */ | ||
256 | int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set); | ||
257 | |||
258 | /** | ||
259 | * Set/Clear ALL_ENODE mcast MAC. | ||
260 | * | ||
261 | * @param bp | ||
262 | * @param set | ||
263 | * | ||
264 | * @return int | ||
265 | */ | ||
266 | int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); | ||
267 | #endif | ||
268 | |||
245 | /** | 269 | /** |
246 | * Set MAC filtering configurations. | 270 | * Set MAC filtering configurations. |
247 | * | 271 | * |
@@ -695,7 +719,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) | |||
695 | int i; | 719 | int i; |
696 | 720 | ||
697 | /* Add NAPI objects */ | 721 | /* Add NAPI objects */ |
698 | for_each_queue(bp, i) | 722 | for_each_napi_queue(bp, i) |
699 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 723 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), |
700 | bnx2x_poll, BNX2X_NAPI_WEIGHT); | 724 | bnx2x_poll, BNX2X_NAPI_WEIGHT); |
701 | } | 725 | } |
@@ -704,7 +728,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) | |||
704 | { | 728 | { |
705 | int i; | 729 | int i; |
706 | 730 | ||
707 | for_each_queue(bp, i) | 731 | for_each_napi_queue(bp, i) |
708 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | 732 | netif_napi_del(&bnx2x_fp(bp, i, napi)); |
709 | } | 733 | } |
710 | 734 | ||
@@ -870,7 +894,7 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp) | |||
870 | { | 894 | { |
871 | int i, j; | 895 | int i, j; |
872 | 896 | ||
873 | for_each_queue(bp, j) { | 897 | for_each_tx_queue(bp, j) { |
874 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 898 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
875 | 899 | ||
876 | for (i = 1; i <= NUM_TX_RINGS; i++) { | 900 | for (i = 1; i <= NUM_TX_RINGS; i++) { |
@@ -949,7 +973,30 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) | |||
949 | } | 973 | } |
950 | } | 974 | } |
951 | 975 | ||
976 | #ifdef BCM_CNIC | ||
977 | static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) | ||
978 | { | ||
979 | bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID + | ||
980 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | ||
981 | bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; | ||
982 | bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; | ||
983 | bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; | ||
984 | bnx2x_fcoe(bp, bp) = bp; | ||
985 | bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; | ||
986 | bnx2x_fcoe(bp, index) = FCOE_IDX; | ||
987 | bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; | ||
988 | bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; | ||
989 | /* qZone id equals to FW (per path) client id */ | ||
990 | bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) + | ||
991 | BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : | ||
992 | ETH_MAX_RX_CLIENTS_E1H); | ||
993 | /* init shortcut */ | ||
994 | bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ? | ||
995 | USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) : | ||
996 | USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id); | ||
952 | 997 | ||
998 | } | ||
999 | #endif | ||
953 | 1000 | ||
954 | static inline void __storm_memset_struct(struct bnx2x *bp, | 1001 | static inline void __storm_memset_struct(struct bnx2x *bp, |
955 | u32 addr, size_t size, u32 *data) | 1002 | u32 addr, size_t size, u32 *data) |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index bd94827e5e57..99c672d894ca 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -25,6 +25,143 @@ | |||
25 | #include "bnx2x_cmn.h" | 25 | #include "bnx2x_cmn.h" |
26 | #include "bnx2x_dump.h" | 26 | #include "bnx2x_dump.h" |
27 | 27 | ||
28 | /* Note: in the format strings below %s is replaced by the queue-name which is | ||
29 | * either its index or 'fcoe' for the fcoe queue. Make sure the format string | ||
30 | * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2 | ||
31 | */ | ||
32 | #define MAX_QUEUE_NAME_LEN 4 | ||
33 | static const struct { | ||
34 | long offset; | ||
35 | int size; | ||
36 | char string[ETH_GSTRING_LEN]; | ||
37 | } bnx2x_q_stats_arr[] = { | ||
38 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, | ||
39 | { Q_STATS_OFFSET32(error_bytes_received_hi), | ||
40 | 8, "[%s]: rx_error_bytes" }, | ||
41 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), | ||
42 | 8, "[%s]: rx_ucast_packets" }, | ||
43 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), | ||
44 | 8, "[%s]: rx_mcast_packets" }, | ||
45 | { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
46 | 8, "[%s]: rx_bcast_packets" }, | ||
47 | { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" }, | ||
48 | { Q_STATS_OFFSET32(rx_err_discard_pkt), | ||
49 | 4, "[%s]: rx_phy_ip_err_discards"}, | ||
50 | { Q_STATS_OFFSET32(rx_skb_alloc_failed), | ||
51 | 4, "[%s]: rx_skb_alloc_discard" }, | ||
52 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, | ||
53 | |||
54 | /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, | ||
55 | { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
56 | 8, "[%s]: tx_ucast_packets" }, | ||
57 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
58 | 8, "[%s]: tx_mcast_packets" }, | ||
59 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
60 | 8, "[%s]: tx_bcast_packets" } | ||
61 | }; | ||
62 | |||
63 | #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) | ||
64 | |||
65 | static const struct { | ||
66 | long offset; | ||
67 | int size; | ||
68 | u32 flags; | ||
69 | #define STATS_FLAGS_PORT 1 | ||
70 | #define STATS_FLAGS_FUNC 2 | ||
71 | #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) | ||
72 | char string[ETH_GSTRING_LEN]; | ||
73 | } bnx2x_stats_arr[] = { | ||
74 | /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), | ||
75 | 8, STATS_FLAGS_BOTH, "rx_bytes" }, | ||
76 | { STATS_OFFSET32(error_bytes_received_hi), | ||
77 | 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, | ||
78 | { STATS_OFFSET32(total_unicast_packets_received_hi), | ||
79 | 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, | ||
80 | { STATS_OFFSET32(total_multicast_packets_received_hi), | ||
81 | 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, | ||
82 | { STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
83 | 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, | ||
84 | { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), | ||
85 | 8, STATS_FLAGS_PORT, "rx_crc_errors" }, | ||
86 | { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), | ||
87 | 8, STATS_FLAGS_PORT, "rx_align_errors" }, | ||
88 | { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), | ||
89 | 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, | ||
90 | { STATS_OFFSET32(etherstatsoverrsizepkts_hi), | ||
91 | 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, | ||
92 | /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), | ||
93 | 8, STATS_FLAGS_PORT, "rx_fragments" }, | ||
94 | { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), | ||
95 | 8, STATS_FLAGS_PORT, "rx_jabbers" }, | ||
96 | { STATS_OFFSET32(no_buff_discard_hi), | ||
97 | 8, STATS_FLAGS_BOTH, "rx_discards" }, | ||
98 | { STATS_OFFSET32(mac_filter_discard), | ||
99 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, | ||
100 | { STATS_OFFSET32(xxoverflow_discard), | ||
101 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | ||
102 | { STATS_OFFSET32(brb_drop_hi), | ||
103 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, | ||
104 | { STATS_OFFSET32(brb_truncate_hi), | ||
105 | 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, | ||
106 | { STATS_OFFSET32(pause_frames_received_hi), | ||
107 | 8, STATS_FLAGS_PORT, "rx_pause_frames" }, | ||
108 | { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), | ||
109 | 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, | ||
110 | { STATS_OFFSET32(nig_timer_max), | ||
111 | 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, | ||
112 | /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), | ||
113 | 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, | ||
114 | { STATS_OFFSET32(rx_skb_alloc_failed), | ||
115 | 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, | ||
116 | { STATS_OFFSET32(hw_csum_err), | ||
117 | 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, | ||
118 | |||
119 | { STATS_OFFSET32(total_bytes_transmitted_hi), | ||
120 | 8, STATS_FLAGS_BOTH, "tx_bytes" }, | ||
121 | { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), | ||
122 | 8, STATS_FLAGS_PORT, "tx_error_bytes" }, | ||
123 | { STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
124 | 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, | ||
125 | { STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
126 | 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, | ||
127 | { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
128 | 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, | ||
129 | { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), | ||
130 | 8, STATS_FLAGS_PORT, "tx_mac_errors" }, | ||
131 | { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), | ||
132 | 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, | ||
133 | /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), | ||
134 | 8, STATS_FLAGS_PORT, "tx_single_collisions" }, | ||
135 | { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), | ||
136 | 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, | ||
137 | { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), | ||
138 | 8, STATS_FLAGS_PORT, "tx_deferred" }, | ||
139 | { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), | ||
140 | 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, | ||
141 | { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), | ||
142 | 8, STATS_FLAGS_PORT, "tx_late_collisions" }, | ||
143 | { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), | ||
144 | 8, STATS_FLAGS_PORT, "tx_total_collisions" }, | ||
145 | { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), | ||
146 | 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, | ||
147 | { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), | ||
148 | 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, | ||
149 | { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), | ||
150 | 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, | ||
151 | { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), | ||
152 | 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, | ||
153 | /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), | ||
154 | 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, | ||
155 | { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), | ||
156 | 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, | ||
157 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | ||
158 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, | ||
159 | { STATS_OFFSET32(pause_frames_sent_hi), | ||
160 | 8, STATS_FLAGS_PORT, "tx_pause_frames" } | ||
161 | }; | ||
162 | |||
163 | #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) | ||
164 | |||
28 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 165 | static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
29 | { | 166 | { |
30 | struct bnx2x *bp = netdev_priv(dev); | 167 | struct bnx2x *bp = netdev_priv(dev); |
@@ -1318,7 +1455,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) | |||
1318 | 1455 | ||
1319 | save_val = REG_RD(bp, offset); | 1456 | save_val = REG_RD(bp, offset); |
1320 | 1457 | ||
1321 | REG_WR(bp, offset, (wr_val & mask)); | 1458 | REG_WR(bp, offset, wr_val & mask); |
1322 | 1459 | ||
1323 | val = REG_RD(bp, offset); | 1460 | val = REG_RD(bp, offset); |
1324 | 1461 | ||
@@ -1689,7 +1826,7 @@ static int bnx2x_test_intr(struct bnx2x *bp) | |||
1689 | config->hdr.client_id = bp->fp->cl_id; | 1826 | config->hdr.client_id = bp->fp->cl_id; |
1690 | config->hdr.reserved1 = 0; | 1827 | config->hdr.reserved1 = 0; |
1691 | 1828 | ||
1692 | bp->set_mac_pending++; | 1829 | bp->set_mac_pending = 1; |
1693 | smp_wmb(); | 1830 | smp_wmb(); |
1694 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, | 1831 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, |
1695 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), | 1832 | U64_HI(bnx2x_sp_mapping(bp, mac_config)), |
@@ -1787,134 +1924,6 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1787 | #endif | 1924 | #endif |
1788 | } | 1925 | } |
1789 | 1926 | ||
1790 | static const struct { | ||
1791 | long offset; | ||
1792 | int size; | ||
1793 | u8 string[ETH_GSTRING_LEN]; | ||
1794 | } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = { | ||
1795 | /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" }, | ||
1796 | { Q_STATS_OFFSET32(error_bytes_received_hi), | ||
1797 | 8, "[%d]: rx_error_bytes" }, | ||
1798 | { Q_STATS_OFFSET32(total_unicast_packets_received_hi), | ||
1799 | 8, "[%d]: rx_ucast_packets" }, | ||
1800 | { Q_STATS_OFFSET32(total_multicast_packets_received_hi), | ||
1801 | 8, "[%d]: rx_mcast_packets" }, | ||
1802 | { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
1803 | 8, "[%d]: rx_bcast_packets" }, | ||
1804 | { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" }, | ||
1805 | { Q_STATS_OFFSET32(rx_err_discard_pkt), | ||
1806 | 4, "[%d]: rx_phy_ip_err_discards"}, | ||
1807 | { Q_STATS_OFFSET32(rx_skb_alloc_failed), | ||
1808 | 4, "[%d]: rx_skb_alloc_discard" }, | ||
1809 | { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" }, | ||
1810 | |||
1811 | /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, | ||
1812 | { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
1813 | 8, "[%d]: tx_ucast_packets" }, | ||
1814 | { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
1815 | 8, "[%d]: tx_mcast_packets" }, | ||
1816 | { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
1817 | 8, "[%d]: tx_bcast_packets" } | ||
1818 | }; | ||
1819 | |||
1820 | static const struct { | ||
1821 | long offset; | ||
1822 | int size; | ||
1823 | u32 flags; | ||
1824 | #define STATS_FLAGS_PORT 1 | ||
1825 | #define STATS_FLAGS_FUNC 2 | ||
1826 | #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) | ||
1827 | u8 string[ETH_GSTRING_LEN]; | ||
1828 | } bnx2x_stats_arr[BNX2X_NUM_STATS] = { | ||
1829 | /* 1 */ { STATS_OFFSET32(total_bytes_received_hi), | ||
1830 | 8, STATS_FLAGS_BOTH, "rx_bytes" }, | ||
1831 | { STATS_OFFSET32(error_bytes_received_hi), | ||
1832 | 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, | ||
1833 | { STATS_OFFSET32(total_unicast_packets_received_hi), | ||
1834 | 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, | ||
1835 | { STATS_OFFSET32(total_multicast_packets_received_hi), | ||
1836 | 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, | ||
1837 | { STATS_OFFSET32(total_broadcast_packets_received_hi), | ||
1838 | 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, | ||
1839 | { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), | ||
1840 | 8, STATS_FLAGS_PORT, "rx_crc_errors" }, | ||
1841 | { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), | ||
1842 | 8, STATS_FLAGS_PORT, "rx_align_errors" }, | ||
1843 | { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), | ||
1844 | 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, | ||
1845 | { STATS_OFFSET32(etherstatsoverrsizepkts_hi), | ||
1846 | 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, | ||
1847 | /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), | ||
1848 | 8, STATS_FLAGS_PORT, "rx_fragments" }, | ||
1849 | { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), | ||
1850 | 8, STATS_FLAGS_PORT, "rx_jabbers" }, | ||
1851 | { STATS_OFFSET32(no_buff_discard_hi), | ||
1852 | 8, STATS_FLAGS_BOTH, "rx_discards" }, | ||
1853 | { STATS_OFFSET32(mac_filter_discard), | ||
1854 | 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, | ||
1855 | { STATS_OFFSET32(xxoverflow_discard), | ||
1856 | 4, STATS_FLAGS_PORT, "rx_fw_discards" }, | ||
1857 | { STATS_OFFSET32(brb_drop_hi), | ||
1858 | 8, STATS_FLAGS_PORT, "rx_brb_discard" }, | ||
1859 | { STATS_OFFSET32(brb_truncate_hi), | ||
1860 | 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, | ||
1861 | { STATS_OFFSET32(pause_frames_received_hi), | ||
1862 | 8, STATS_FLAGS_PORT, "rx_pause_frames" }, | ||
1863 | { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), | ||
1864 | 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, | ||
1865 | { STATS_OFFSET32(nig_timer_max), | ||
1866 | 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, | ||
1867 | /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), | ||
1868 | 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, | ||
1869 | { STATS_OFFSET32(rx_skb_alloc_failed), | ||
1870 | 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, | ||
1871 | { STATS_OFFSET32(hw_csum_err), | ||
1872 | 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, | ||
1873 | |||
1874 | { STATS_OFFSET32(total_bytes_transmitted_hi), | ||
1875 | 8, STATS_FLAGS_BOTH, "tx_bytes" }, | ||
1876 | { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), | ||
1877 | 8, STATS_FLAGS_PORT, "tx_error_bytes" }, | ||
1878 | { STATS_OFFSET32(total_unicast_packets_transmitted_hi), | ||
1879 | 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, | ||
1880 | { STATS_OFFSET32(total_multicast_packets_transmitted_hi), | ||
1881 | 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, | ||
1882 | { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), | ||
1883 | 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, | ||
1884 | { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), | ||
1885 | 8, STATS_FLAGS_PORT, "tx_mac_errors" }, | ||
1886 | { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), | ||
1887 | 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, | ||
1888 | /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), | ||
1889 | 8, STATS_FLAGS_PORT, "tx_single_collisions" }, | ||
1890 | { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), | ||
1891 | 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, | ||
1892 | { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), | ||
1893 | 8, STATS_FLAGS_PORT, "tx_deferred" }, | ||
1894 | { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), | ||
1895 | 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, | ||
1896 | { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), | ||
1897 | 8, STATS_FLAGS_PORT, "tx_late_collisions" }, | ||
1898 | { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), | ||
1899 | 8, STATS_FLAGS_PORT, "tx_total_collisions" }, | ||
1900 | { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), | ||
1901 | 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, | ||
1902 | { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), | ||
1903 | 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, | ||
1904 | { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), | ||
1905 | 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, | ||
1906 | { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), | ||
1907 | 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, | ||
1908 | /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), | ||
1909 | 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, | ||
1910 | { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), | ||
1911 | 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, | ||
1912 | { STATS_OFFSET32(etherstatspktsover1522octets_hi), | ||
1913 | 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, | ||
1914 | { STATS_OFFSET32(pause_frames_sent_hi), | ||
1915 | 8, STATS_FLAGS_PORT, "tx_pause_frames" } | ||
1916 | }; | ||
1917 | |||
1918 | #define IS_PORT_STAT(i) \ | 1927 | #define IS_PORT_STAT(i) \ |
1919 | ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) | 1928 | ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) |
1920 | #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) | 1929 | #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) |
@@ -1929,7 +1938,8 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) | |||
1929 | switch (stringset) { | 1938 | switch (stringset) { |
1930 | case ETH_SS_STATS: | 1939 | case ETH_SS_STATS: |
1931 | if (is_multi(bp)) { | 1940 | if (is_multi(bp)) { |
1932 | num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; | 1941 | num_stats = BNX2X_NUM_STAT_QUEUES(bp) * |
1942 | BNX2X_NUM_Q_STATS; | ||
1933 | if (!IS_MF_MODE_STAT(bp)) | 1943 | if (!IS_MF_MODE_STAT(bp)) |
1934 | num_stats += BNX2X_NUM_STATS; | 1944 | num_stats += BNX2X_NUM_STATS; |
1935 | } else { | 1945 | } else { |
@@ -1955,15 +1965,25 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | |||
1955 | { | 1965 | { |
1956 | struct bnx2x *bp = netdev_priv(dev); | 1966 | struct bnx2x *bp = netdev_priv(dev); |
1957 | int i, j, k; | 1967 | int i, j, k; |
1968 | char queue_name[MAX_QUEUE_NAME_LEN+1]; | ||
1958 | 1969 | ||
1959 | switch (stringset) { | 1970 | switch (stringset) { |
1960 | case ETH_SS_STATS: | 1971 | case ETH_SS_STATS: |
1961 | if (is_multi(bp)) { | 1972 | if (is_multi(bp)) { |
1962 | k = 0; | 1973 | k = 0; |
1963 | for_each_queue(bp, i) { | 1974 | for_each_napi_queue(bp, i) { |
1975 | memset(queue_name, 0, sizeof(queue_name)); | ||
1976 | |||
1977 | if (IS_FCOE_IDX(i)) | ||
1978 | sprintf(queue_name, "fcoe"); | ||
1979 | else | ||
1980 | sprintf(queue_name, "%d", i); | ||
1981 | |||
1964 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) | 1982 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) |
1965 | sprintf(buf + (k + j)*ETH_GSTRING_LEN, | 1983 | snprintf(buf + (k + j)*ETH_GSTRING_LEN, |
1966 | bnx2x_q_stats_arr[j].string, i); | 1984 | ETH_GSTRING_LEN, |
1985 | bnx2x_q_stats_arr[j].string, | ||
1986 | queue_name); | ||
1967 | k += BNX2X_NUM_Q_STATS; | 1987 | k += BNX2X_NUM_Q_STATS; |
1968 | } | 1988 | } |
1969 | if (IS_MF_MODE_STAT(bp)) | 1989 | if (IS_MF_MODE_STAT(bp)) |
@@ -1997,7 +2017,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, | |||
1997 | 2017 | ||
1998 | if (is_multi(bp)) { | 2018 | if (is_multi(bp)) { |
1999 | k = 0; | 2019 | k = 0; |
2000 | for_each_queue(bp, i) { | 2020 | for_each_napi_queue(bp, i) { |
2001 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; | 2021 | hw_stats = (u32 *)&bp->fp[i].eth_q_stats; |
2002 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { | 2022 | for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { |
2003 | if (bnx2x_q_stats_arr[j].size == 0) { | 2023 | if (bnx2x_q_stats_arr[j].size == 0) { |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 0068a1dbc064..e6e2746e8bfe 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -121,6 +121,10 @@ MODULE_PARM_DESC(debug, " Default debug msglevel"); | |||
121 | 121 | ||
122 | static struct workqueue_struct *bnx2x_wq; | 122 | static struct workqueue_struct *bnx2x_wq; |
123 | 123 | ||
124 | #ifdef BCM_CNIC | ||
125 | static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01}; | ||
126 | #endif | ||
127 | |||
124 | enum bnx2x_board_type { | 128 | enum bnx2x_board_type { |
125 | BCM57710 = 0, | 129 | BCM57710 = 0, |
126 | BCM57711 = 1, | 130 | BCM57711 = 1, |
@@ -921,7 +925,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
921 | sp_sb_data.p_func.vf_valid); | 925 | sp_sb_data.p_func.vf_valid); |
922 | 926 | ||
923 | 927 | ||
924 | for_each_queue(bp, i) { | 928 | for_each_eth_queue(bp, i) { |
925 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 929 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
926 | int loop; | 930 | int loop; |
927 | struct hc_status_block_data_e2 sb_data_e2; | 931 | struct hc_status_block_data_e2 sb_data_e2; |
@@ -961,6 +965,10 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
961 | 965 | ||
962 | /* host sb data */ | 966 | /* host sb data */ |
963 | 967 | ||
968 | #ifdef BCM_CNIC | ||
969 | if (IS_FCOE_FP(fp)) | ||
970 | continue; | ||
971 | #endif | ||
964 | BNX2X_ERR(" run indexes ("); | 972 | BNX2X_ERR(" run indexes ("); |
965 | for (j = 0; j < HC_SB_MAX_SM; j++) | 973 | for (j = 0; j < HC_SB_MAX_SM; j++) |
966 | pr_cont("0x%x%s", | 974 | pr_cont("0x%x%s", |
@@ -1029,7 +1037,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
1029 | #ifdef BNX2X_STOP_ON_ERROR | 1037 | #ifdef BNX2X_STOP_ON_ERROR |
1030 | /* Rings */ | 1038 | /* Rings */ |
1031 | /* Rx */ | 1039 | /* Rx */ |
1032 | for_each_queue(bp, i) { | 1040 | for_each_rx_queue(bp, i) { |
1033 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1041 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1034 | 1042 | ||
1035 | start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); | 1043 | start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); |
@@ -1063,7 +1071,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) | |||
1063 | } | 1071 | } |
1064 | 1072 | ||
1065 | /* Tx */ | 1073 | /* Tx */ |
1066 | for_each_queue(bp, i) { | 1074 | for_each_tx_queue(bp, i) { |
1067 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1075 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1068 | 1076 | ||
1069 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); | 1077 | start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); |
@@ -1298,7 +1306,7 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
1298 | #ifdef BCM_CNIC | 1306 | #ifdef BCM_CNIC |
1299 | offset++; | 1307 | offset++; |
1300 | #endif | 1308 | #endif |
1301 | for_each_queue(bp, i) | 1309 | for_each_eth_queue(bp, i) |
1302 | synchronize_irq(bp->msix_table[i + offset].vector); | 1310 | synchronize_irq(bp->msix_table[i + offset].vector); |
1303 | } else | 1311 | } else |
1304 | synchronize_irq(bp->pdev->irq); | 1312 | synchronize_irq(bp->pdev->irq); |
@@ -1420,7 +1428,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1420 | return IRQ_HANDLED; | 1428 | return IRQ_HANDLED; |
1421 | #endif | 1429 | #endif |
1422 | 1430 | ||
1423 | for_each_queue(bp, i) { | 1431 | for_each_eth_queue(bp, i) { |
1424 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1432 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1425 | 1433 | ||
1426 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); | 1434 | mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); |
@@ -2253,6 +2261,15 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) | |||
2253 | return rc; | 2261 | return rc; |
2254 | } | 2262 | } |
2255 | 2263 | ||
2264 | static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) | ||
2265 | { | ||
2266 | #ifdef BCM_CNIC | ||
2267 | if (IS_FCOE_FP(fp) && IS_MF(bp)) | ||
2268 | return false; | ||
2269 | #endif | ||
2270 | return true; | ||
2271 | } | ||
2272 | |||
2256 | /* must be called under rtnl_lock */ | 2273 | /* must be called under rtnl_lock */ |
2257 | static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) | 2274 | static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) |
2258 | { | 2275 | { |
@@ -2411,7 +2428,8 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp, | |||
2411 | if (!fp->disable_tpa) | 2428 | if (!fp->disable_tpa) |
2412 | flags |= QUEUE_FLG_TPA; | 2429 | flags |= QUEUE_FLG_TPA; |
2413 | 2430 | ||
2414 | flags |= QUEUE_FLG_STATS; | 2431 | flags = stat_counter_valid(bp, fp) ? |
2432 | (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS); | ||
2415 | 2433 | ||
2416 | return flags; | 2434 | return flags; |
2417 | } | 2435 | } |
@@ -2471,7 +2489,10 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | |||
2471 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | 2489 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; |
2472 | rxq_init->fw_sb_id = fp->fw_sb_id; | 2490 | rxq_init->fw_sb_id = fp->fw_sb_id; |
2473 | 2491 | ||
2474 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | 2492 | if (IS_FCOE_FP(fp)) |
2493 | rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; | ||
2494 | else | ||
2495 | rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; | ||
2475 | 2496 | ||
2476 | rxq_init->cid = HW_CID(bp, fp->cid); | 2497 | rxq_init->cid = HW_CID(bp, fp->cid); |
2477 | 2498 | ||
@@ -2491,6 +2512,12 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp, | |||
2491 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; | 2512 | txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; |
2492 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; | 2513 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; |
2493 | txq_init->fw_sb_id = fp->fw_sb_id; | 2514 | txq_init->fw_sb_id = fp->fw_sb_id; |
2515 | |||
2516 | if (IS_FCOE_FP(fp)) { | ||
2517 | txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; | ||
2518 | txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; | ||
2519 | } | ||
2520 | |||
2494 | txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; | 2521 | txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; |
2495 | } | 2522 | } |
2496 | 2523 | ||
@@ -3689,8 +3716,11 @@ static void bnx2x_eq_int(struct bnx2x *bp) | |||
3689 | #ifdef BCM_CNIC | 3716 | #ifdef BCM_CNIC |
3690 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) | 3717 | if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) |
3691 | goto next_spqe; | 3718 | goto next_spqe; |
3719 | if (cid == BNX2X_FCOE_ETH_CID) | ||
3720 | bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; | ||
3721 | else | ||
3692 | #endif | 3722 | #endif |
3693 | bnx2x_fp(bp, cid, state) = | 3723 | bnx2x_fp(bp, cid, state) = |
3694 | BNX2X_FP_STATE_CLOSED; | 3724 | BNX2X_FP_STATE_CLOSED; |
3695 | 3725 | ||
3696 | goto next_spqe; | 3726 | goto next_spqe; |
@@ -3766,7 +3796,13 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
3766 | 3796 | ||
3767 | /* SP events: STAT_QUERY and others */ | 3797 | /* SP events: STAT_QUERY and others */ |
3768 | if (status & BNX2X_DEF_SB_IDX) { | 3798 | if (status & BNX2X_DEF_SB_IDX) { |
3799 | #ifdef BCM_CNIC | ||
3800 | struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); | ||
3769 | 3801 | ||
3802 | if ((!NO_FCOE(bp)) && | ||
3803 | (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) | ||
3804 | napi_schedule(&bnx2x_fcoe(bp, napi)); | ||
3805 | #endif | ||
3770 | /* Handle EQ completions */ | 3806 | /* Handle EQ completions */ |
3771 | bnx2x_eq_int(bp); | 3807 | bnx2x_eq_int(bp); |
3772 | 3808 | ||
@@ -4149,7 +4185,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp) | |||
4149 | { | 4185 | { |
4150 | int i; | 4186 | int i; |
4151 | 4187 | ||
4152 | for_each_queue(bp, i) | 4188 | for_each_eth_queue(bp, i) |
4153 | bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, | 4189 | bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, |
4154 | bp->rx_ticks, bp->tx_ticks); | 4190 | bp->rx_ticks, bp->tx_ticks); |
4155 | } | 4191 | } |
@@ -4197,13 +4233,16 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
4197 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 4233 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
4198 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 4234 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
4199 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, | 4235 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, |
4200 | bp->fp->cl_id + (i % bp->num_queues)); | 4236 | bp->fp->cl_id + (i % (bp->num_queues - |
4237 | NONE_ETH_CONTEXT_USE))); | ||
4201 | } | 4238 | } |
4202 | 4239 | ||
4203 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 4240 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
4204 | { | 4241 | { |
4205 | int mode = bp->rx_mode; | 4242 | int mode = bp->rx_mode; |
4243 | int port = BP_PORT(bp); | ||
4206 | u16 cl_id; | 4244 | u16 cl_id; |
4245 | u32 def_q_filters = 0; | ||
4207 | 4246 | ||
4208 | /* All but management unicast packets should pass to the host as well */ | 4247 | /* All but management unicast packets should pass to the host as well */ |
4209 | u32 llh_mask = | 4248 | u32 llh_mask = |
@@ -4214,30 +4253,42 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4214 | 4253 | ||
4215 | switch (mode) { | 4254 | switch (mode) { |
4216 | case BNX2X_RX_MODE_NONE: /* no Rx */ | 4255 | case BNX2X_RX_MODE_NONE: /* no Rx */ |
4217 | cl_id = BP_L_ID(bp); | 4256 | def_q_filters = BNX2X_ACCEPT_NONE; |
4218 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | 4257 | #ifdef BCM_CNIC |
4258 | if (!NO_FCOE(bp)) { | ||
4259 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4260 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | ||
4261 | } | ||
4262 | #endif | ||
4219 | break; | 4263 | break; |
4220 | 4264 | ||
4221 | case BNX2X_RX_MODE_NORMAL: | 4265 | case BNX2X_RX_MODE_NORMAL: |
4222 | cl_id = BP_L_ID(bp); | 4266 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4223 | bnx2x_rxq_set_mac_filters(bp, cl_id, | 4267 | BNX2X_ACCEPT_MULTICAST; |
4224 | BNX2X_ACCEPT_UNICAST | | 4268 | #ifdef BCM_CNIC |
4225 | BNX2X_ACCEPT_BROADCAST | | 4269 | cl_id = bnx2x_fcoe(bp, cl_id); |
4226 | BNX2X_ACCEPT_MULTICAST); | 4270 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | |
4271 | BNX2X_ACCEPT_MULTICAST); | ||
4272 | #endif | ||
4227 | break; | 4273 | break; |
4228 | 4274 | ||
4229 | case BNX2X_RX_MODE_ALLMULTI: | 4275 | case BNX2X_RX_MODE_ALLMULTI: |
4230 | cl_id = BP_L_ID(bp); | 4276 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4231 | bnx2x_rxq_set_mac_filters(bp, cl_id, | 4277 | BNX2X_ACCEPT_ALL_MULTICAST; |
4232 | BNX2X_ACCEPT_UNICAST | | 4278 | #ifdef BCM_CNIC |
4233 | BNX2X_ACCEPT_BROADCAST | | 4279 | cl_id = bnx2x_fcoe(bp, cl_id); |
4234 | BNX2X_ACCEPT_ALL_MULTICAST); | 4280 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | |
4281 | BNX2X_ACCEPT_MULTICAST); | ||
4282 | #endif | ||
4235 | break; | 4283 | break; |
4236 | 4284 | ||
4237 | case BNX2X_RX_MODE_PROMISC: | 4285 | case BNX2X_RX_MODE_PROMISC: |
4238 | cl_id = BP_L_ID(bp); | 4286 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; |
4239 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE); | 4287 | #ifdef BCM_CNIC |
4240 | 4288 | cl_id = bnx2x_fcoe(bp, cl_id); | |
4289 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | ||
4290 | BNX2X_ACCEPT_MULTICAST); | ||
4291 | #endif | ||
4241 | /* pass management unicast packets as well */ | 4292 | /* pass management unicast packets as well */ |
4242 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 4293 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; |
4243 | break; | 4294 | break; |
@@ -4247,20 +4298,24 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4247 | break; | 4298 | break; |
4248 | } | 4299 | } |
4249 | 4300 | ||
4301 | cl_id = BP_L_ID(bp); | ||
4302 | bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters); | ||
4303 | |||
4250 | REG_WR(bp, | 4304 | REG_WR(bp, |
4251 | BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : | 4305 | (port ? NIG_REG_LLH1_BRB1_DRV_MASK : |
4252 | NIG_REG_LLH0_BRB1_DRV_MASK, | 4306 | NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask); |
4253 | llh_mask); | ||
4254 | 4307 | ||
4255 | DP(NETIF_MSG_IFUP, "rx mode %d\n" | 4308 | DP(NETIF_MSG_IFUP, "rx mode %d\n" |
4256 | "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" | 4309 | "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" |
4257 | "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode, | 4310 | "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n" |
4311 | "unmatched_ucast 0x%x\n", mode, | ||
4258 | bp->mac_filters.ucast_drop_all, | 4312 | bp->mac_filters.ucast_drop_all, |
4259 | bp->mac_filters.mcast_drop_all, | 4313 | bp->mac_filters.mcast_drop_all, |
4260 | bp->mac_filters.bcast_drop_all, | 4314 | bp->mac_filters.bcast_drop_all, |
4261 | bp->mac_filters.ucast_accept_all, | 4315 | bp->mac_filters.ucast_accept_all, |
4262 | bp->mac_filters.mcast_accept_all, | 4316 | bp->mac_filters.mcast_accept_all, |
4263 | bp->mac_filters.bcast_accept_all | 4317 | bp->mac_filters.bcast_accept_all, |
4318 | bp->mac_filters.unmatched_unicast | ||
4264 | ); | 4319 | ); |
4265 | 4320 | ||
4266 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); | 4321 | storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); |
@@ -4369,9 +4424,11 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
4369 | { | 4424 | { |
4370 | int i; | 4425 | int i; |
4371 | 4426 | ||
4372 | for_each_queue(bp, i) | 4427 | for_each_eth_queue(bp, i) |
4373 | bnx2x_init_fp_sb(bp, i); | 4428 | bnx2x_init_fp_sb(bp, i); |
4374 | #ifdef BCM_CNIC | 4429 | #ifdef BCM_CNIC |
4430 | if (!NO_FCOE(bp)) | ||
4431 | bnx2x_init_fcoe_fp(bp); | ||
4375 | 4432 | ||
4376 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, | 4433 | bnx2x_init_sb(bp, bp->cnic_sb_mapping, |
4377 | BNX2X_VF_ID_INVALID, false, | 4434 | BNX2X_VF_ID_INVALID, false, |
@@ -5877,6 +5934,15 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5877 | /* fastpath */ | 5934 | /* fastpath */ |
5878 | /* Common */ | 5935 | /* Common */ |
5879 | for_each_queue(bp, i) { | 5936 | for_each_queue(bp, i) { |
5937 | #ifdef BCM_CNIC | ||
5938 | /* FCoE client uses default status block */ | ||
5939 | if (IS_FCOE_IDX(i)) { | ||
5940 | union host_hc_status_block *sb = | ||
5941 | &bnx2x_fp(bp, i, status_blk); | ||
5942 | memset(sb, 0, sizeof(union host_hc_status_block)); | ||
5943 | bnx2x_fp(bp, i, status_blk_mapping) = 0; | ||
5944 | } else { | ||
5945 | #endif | ||
5880 | /* status blocks */ | 5946 | /* status blocks */ |
5881 | if (CHIP_IS_E2(bp)) | 5947 | if (CHIP_IS_E2(bp)) |
5882 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb), | 5948 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb), |
@@ -5886,9 +5952,12 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5886 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb), | 5952 | BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb), |
5887 | bnx2x_fp(bp, i, status_blk_mapping), | 5953 | bnx2x_fp(bp, i, status_blk_mapping), |
5888 | sizeof(struct host_hc_status_block_e1x)); | 5954 | sizeof(struct host_hc_status_block_e1x)); |
5955 | #ifdef BCM_CNIC | ||
5956 | } | ||
5957 | #endif | ||
5889 | } | 5958 | } |
5890 | /* Rx */ | 5959 | /* Rx */ |
5891 | for_each_queue(bp, i) { | 5960 | for_each_rx_queue(bp, i) { |
5892 | 5961 | ||
5893 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | 5962 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ |
5894 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); | 5963 | BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); |
@@ -5908,7 +5977,7 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
5908 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | 5977 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); |
5909 | } | 5978 | } |
5910 | /* Tx */ | 5979 | /* Tx */ |
5911 | for_each_queue(bp, i) { | 5980 | for_each_tx_queue(bp, i) { |
5912 | 5981 | ||
5913 | /* fastpath tx rings: tx_buf tx_desc */ | 5982 | /* fastpath tx rings: tx_buf tx_desc */ |
5914 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); | 5983 | BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); |
@@ -5992,15 +6061,20 @@ int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5992 | union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk); | 6061 | union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk); |
5993 | bnx2x_fp(bp, i, bp) = bp; | 6062 | bnx2x_fp(bp, i, bp) = bp; |
5994 | /* status blocks */ | 6063 | /* status blocks */ |
5995 | if (CHIP_IS_E2(bp)) | 6064 | #ifdef BCM_CNIC |
5996 | BNX2X_PCI_ALLOC(sb->e2_sb, | 6065 | if (!IS_FCOE_IDX(i)) { |
5997 | &bnx2x_fp(bp, i, status_blk_mapping), | 6066 | #endif |
5998 | sizeof(struct host_hc_status_block_e2)); | 6067 | if (CHIP_IS_E2(bp)) |
5999 | else | 6068 | BNX2X_PCI_ALLOC(sb->e2_sb, |
6000 | BNX2X_PCI_ALLOC(sb->e1x_sb, | 6069 | &bnx2x_fp(bp, i, status_blk_mapping), |
6001 | &bnx2x_fp(bp, i, status_blk_mapping), | 6070 | sizeof(struct host_hc_status_block_e2)); |
6002 | sizeof(struct host_hc_status_block_e1x)); | 6071 | else |
6003 | 6072 | BNX2X_PCI_ALLOC(sb->e1x_sb, | |
6073 | &bnx2x_fp(bp, i, status_blk_mapping), | ||
6074 | sizeof(struct host_hc_status_block_e1x)); | ||
6075 | #ifdef BCM_CNIC | ||
6076 | } | ||
6077 | #endif | ||
6004 | set_sb_shortcuts(bp, i); | 6078 | set_sb_shortcuts(bp, i); |
6005 | } | 6079 | } |
6006 | /* Rx */ | 6080 | /* Rx */ |
@@ -6410,7 +6484,8 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | |||
6410 | { | 6484 | { |
6411 | u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : | 6485 | u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : |
6412 | bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); | 6486 | bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); |
6413 | u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID; | 6487 | u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + |
6488 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | ||
6414 | u32 cl_bit_vec = (1 << iscsi_l2_cl_id); | 6489 | u32 cl_bit_vec = (1 << iscsi_l2_cl_id); |
6415 | 6490 | ||
6416 | /* Send a SET_MAC ramrod */ | 6491 | /* Send a SET_MAC ramrod */ |
@@ -6418,6 +6493,50 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) | |||
6418 | cam_offset, 0); | 6493 | cam_offset, 0); |
6419 | 6494 | ||
6420 | bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); | 6495 | bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); |
6496 | |||
6497 | return 0; | ||
6498 | } | ||
6499 | |||
6500 | /** | ||
6501 | * Set FCoE L2 MAC(s) at the next enties in the CAM after the | ||
6502 | * ETH MAC(s). This function will wait until the ramdord | ||
6503 | * completion returns. | ||
6504 | * | ||
6505 | * @param bp driver handle | ||
6506 | * @param set set or clear the CAM entry | ||
6507 | * | ||
6508 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. | ||
6509 | */ | ||
6510 | int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set) | ||
6511 | { | ||
6512 | u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); | ||
6513 | /** | ||
6514 | * CAM allocation for E1H | ||
6515 | * eth unicasts: by func number | ||
6516 | * iscsi: by func number | ||
6517 | * fip unicast: by func number | ||
6518 | * fip multicast: by func number | ||
6519 | */ | ||
6520 | bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac, | ||
6521 | cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0); | ||
6522 | |||
6523 | return 0; | ||
6524 | } | ||
6525 | |||
6526 | int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set) | ||
6527 | { | ||
6528 | u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); | ||
6529 | |||
6530 | /** | ||
6531 | * CAM allocation for E1H | ||
6532 | * eth unicasts: by func number | ||
6533 | * iscsi: by func number | ||
6534 | * fip unicast: by func number | ||
6535 | * fip multicast: by func number | ||
6536 | */ | ||
6537 | bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec, | ||
6538 | bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0); | ||
6539 | |||
6421 | return 0; | 6540 | return 0; |
6422 | } | 6541 | } |
6423 | #endif | 6542 | #endif |
@@ -6435,6 +6554,8 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp, | |||
6435 | data->general.statistics_counter_id = params->rxq_params.stat_id; | 6554 | data->general.statistics_counter_id = params->rxq_params.stat_id; |
6436 | data->general.statistics_en_flg = | 6555 | data->general.statistics_en_flg = |
6437 | (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; | 6556 | (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; |
6557 | data->general.is_fcoe_flg = | ||
6558 | (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0; | ||
6438 | data->general.activate_flg = activate; | 6559 | data->general.activate_flg = activate; |
6439 | data->general.sp_client_id = params->rxq_params.spcl_id; | 6560 | data->general.sp_client_id = params->rxq_params.spcl_id; |
6440 | 6561 | ||
@@ -6503,7 +6624,9 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp, | |||
6503 | data->fc.safc_group_num = params->txq_params.cos; | 6624 | data->fc.safc_group_num = params->txq_params.cos; |
6504 | data->fc.safc_group_en_flg = | 6625 | data->fc.safc_group_en_flg = |
6505 | (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; | 6626 | (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; |
6506 | data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW; | 6627 | data->fc.traffic_type = |
6628 | (params->ramrod_params.flags & CLIENT_IS_FCOE) ? | ||
6629 | LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; | ||
6507 | } | 6630 | } |
6508 | 6631 | ||
6509 | static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) | 6632 | static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) |
@@ -6602,7 +6725,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) | |||
6602 | bnx2x_enable_msi(bp); | 6725 | bnx2x_enable_msi(bp); |
6603 | /* falling through... */ | 6726 | /* falling through... */ |
6604 | case INT_MODE_INTx: | 6727 | case INT_MODE_INTx: |
6605 | bp->num_queues = 1; | 6728 | bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; |
6606 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); | 6729 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); |
6607 | break; | 6730 | break; |
6608 | default: | 6731 | default: |
@@ -6625,8 +6748,8 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) | |||
6625 | "enable MSI-X (%d), " | 6748 | "enable MSI-X (%d), " |
6626 | "set number of queues to %d\n", | 6749 | "set number of queues to %d\n", |
6627 | bp->num_queues, | 6750 | bp->num_queues, |
6628 | 1); | 6751 | 1 + NONE_ETH_CONTEXT_USE); |
6629 | bp->num_queues = 1; | 6752 | bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; |
6630 | 6753 | ||
6631 | if (!(bp->flags & DISABLE_MSI_FLAG)) | 6754 | if (!(bp->flags & DISABLE_MSI_FLAG)) |
6632 | bnx2x_enable_msi(bp); | 6755 | bnx2x_enable_msi(bp); |
@@ -6747,7 +6870,9 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
6747 | struct bnx2x_client_init_params params = { {0} }; | 6870 | struct bnx2x_client_init_params params = { {0} }; |
6748 | int rc; | 6871 | int rc; |
6749 | 6872 | ||
6750 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, | 6873 | /* reset IGU state skip FCoE L2 queue */ |
6874 | if (!IS_FCOE_FP(fp)) | ||
6875 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, | ||
6751 | IGU_INT_ENABLE, 0); | 6876 | IGU_INT_ENABLE, 0); |
6752 | 6877 | ||
6753 | params.ramrod_params.pstate = &fp->state; | 6878 | params.ramrod_params.pstate = &fp->state; |
@@ -6755,6 +6880,12 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
6755 | params.ramrod_params.index = fp->index; | 6880 | params.ramrod_params.index = fp->index; |
6756 | params.ramrod_params.cid = fp->cid; | 6881 | params.ramrod_params.cid = fp->cid; |
6757 | 6882 | ||
6883 | #ifdef BCM_CNIC | ||
6884 | if (IS_FCOE_FP(fp)) | ||
6885 | params.ramrod_params.flags |= CLIENT_IS_FCOE; | ||
6886 | |||
6887 | #endif | ||
6888 | |||
6758 | if (is_leading) | 6889 | if (is_leading) |
6759 | params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; | 6890 | params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; |
6760 | 6891 | ||
@@ -6839,7 +6970,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) | |||
6839 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); | 6970 | REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); |
6840 | 6971 | ||
6841 | /* FP SBs */ | 6972 | /* FP SBs */ |
6842 | for_each_queue(bp, i) { | 6973 | for_each_eth_queue(bp, i) { |
6843 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6974 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6844 | REG_WR8(bp, | 6975 | REG_WR8(bp, |
6845 | BAR_CSTRORM_INTMEM + | 6976 | BAR_CSTRORM_INTMEM + |
@@ -6959,6 +7090,20 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | |||
6959 | } | 7090 | } |
6960 | } | 7091 | } |
6961 | 7092 | ||
7093 | #ifdef BCM_CNIC | ||
7094 | static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp) | ||
7095 | { | ||
7096 | if (bp->flags & FCOE_MACS_SET) { | ||
7097 | if (!IS_MF_SD(bp)) | ||
7098 | bnx2x_set_fip_eth_mac_addr(bp, 0); | ||
7099 | |||
7100 | bnx2x_set_all_enode_macs(bp, 0); | ||
7101 | |||
7102 | bp->flags &= ~FCOE_MACS_SET; | ||
7103 | } | ||
7104 | } | ||
7105 | #endif | ||
7106 | |||
6962 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | 7107 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) |
6963 | { | 7108 | { |
6964 | int port = BP_PORT(bp); | 7109 | int port = BP_PORT(bp); |
@@ -6966,7 +7111,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
6966 | int i, cnt, rc; | 7111 | int i, cnt, rc; |
6967 | 7112 | ||
6968 | /* Wait until tx fastpath tasks complete */ | 7113 | /* Wait until tx fastpath tasks complete */ |
6969 | for_each_queue(bp, i) { | 7114 | for_each_tx_queue(bp, i) { |
6970 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 7115 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6971 | 7116 | ||
6972 | cnt = 1000; | 7117 | cnt = 1000; |
@@ -7006,13 +7151,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) | |||
7006 | } | 7151 | } |
7007 | 7152 | ||
7008 | #ifdef BCM_CNIC | 7153 | #ifdef BCM_CNIC |
7009 | /* Clear iSCSI L2 MAC */ | 7154 | bnx2x_del_fcoe_eth_macs(bp); |
7010 | mutex_lock(&bp->cnic_mutex); | ||
7011 | if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) { | ||
7012 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | ||
7013 | bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET; | ||
7014 | } | ||
7015 | mutex_unlock(&bp->cnic_mutex); | ||
7016 | #endif | 7155 | #endif |
7017 | 7156 | ||
7018 | if (unload_mode == UNLOAD_NORMAL) | 7157 | if (unload_mode == UNLOAD_NORMAL) |
@@ -7865,7 +8004,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | |||
7865 | bp->igu_sb_cnt = 0; | 8004 | bp->igu_sb_cnt = 0; |
7866 | if (CHIP_INT_MODE_IS_BC(bp)) { | 8005 | if (CHIP_INT_MODE_IS_BC(bp)) { |
7867 | bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, | 8006 | bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, |
7868 | bp->l2_cid_count); | 8007 | NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); |
7869 | 8008 | ||
7870 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * | 8009 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * |
7871 | FP_SB_MAX_E1x; | 8010 | FP_SB_MAX_E1x; |
@@ -7896,7 +8035,8 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | |||
7896 | } | 8035 | } |
7897 | } | 8036 | } |
7898 | } | 8037 | } |
7899 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count); | 8038 | bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, |
8039 | NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); | ||
7900 | if (bp->igu_sb_cnt == 0) | 8040 | if (bp->igu_sb_cnt == 0) |
7901 | BNX2X_ERR("CAM configuration error\n"); | 8041 | BNX2X_ERR("CAM configuration error\n"); |
7902 | } | 8042 | } |
@@ -8312,6 +8452,17 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
8312 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); | 8452 | memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); |
8313 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); | 8453 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); |
8314 | 8454 | ||
8455 | #ifdef BCM_CNIC | ||
8456 | /* Inform the upper layers about FCoE MAC */ | ||
8457 | if (!CHIP_IS_E1x(bp)) { | ||
8458 | if (IS_MF_SD(bp)) | ||
8459 | memcpy(bp->fip_mac, bp->dev->dev_addr, | ||
8460 | sizeof(bp->fip_mac)); | ||
8461 | else | ||
8462 | memcpy(bp->fip_mac, bp->iscsi_mac, | ||
8463 | sizeof(bp->fip_mac)); | ||
8464 | } | ||
8465 | #endif | ||
8315 | } | 8466 | } |
8316 | 8467 | ||
8317 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | 8468 | static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) |
@@ -8328,7 +8479,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
8328 | 8479 | ||
8329 | bp->igu_dsb_id = DEF_SB_IGU_ID; | 8480 | bp->igu_dsb_id = DEF_SB_IGU_ID; |
8330 | bp->igu_base_sb = 0; | 8481 | bp->igu_base_sb = 0; |
8331 | bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count); | 8482 | bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, |
8483 | NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); | ||
8332 | } else { | 8484 | } else { |
8333 | bp->common.int_block = INT_BLOCK_IGU; | 8485 | bp->common.int_block = INT_BLOCK_IGU; |
8334 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 8486 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
@@ -9263,7 +9415,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9263 | return -ENODEV; | 9415 | return -ENODEV; |
9264 | } | 9416 | } |
9265 | 9417 | ||
9266 | cid_count += CNIC_CONTEXT_USE; | 9418 | cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE; |
9267 | 9419 | ||
9268 | /* dev zeroed in init_etherdev */ | 9420 | /* dev zeroed in init_etherdev */ |
9269 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); | 9421 | dev = alloc_etherdev_mq(sizeof(*bp), cid_count); |
@@ -9292,6 +9444,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9292 | /* calc qm_cid_count */ | 9444 | /* calc qm_cid_count */ |
9293 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); | 9445 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); |
9294 | 9446 | ||
9447 | #ifdef BCM_CNIC | ||
9448 | /* disable FCOE L2 queue for E1x*/ | ||
9449 | if (CHIP_IS_E1x(bp)) | ||
9450 | bp->flags |= NO_FCOE_FLAG; | ||
9451 | |||
9452 | #endif | ||
9453 | |||
9295 | /* Configure interupt mode: try to enable MSI-X/MSI if | 9454 | /* Configure interupt mode: try to enable MSI-X/MSI if |
9296 | * needed, set bp->num_queues appropriately. | 9455 | * needed, set bp->num_queues appropriately. |
9297 | */ | 9456 | */ |
@@ -9306,6 +9465,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
9306 | goto init_one_exit; | 9465 | goto init_one_exit; |
9307 | } | 9466 | } |
9308 | 9467 | ||
9468 | #ifdef BCM_CNIC | ||
9469 | if (!NO_FCOE(bp)) { | ||
9470 | /* Add storage MAC address */ | ||
9471 | rtnl_lock(); | ||
9472 | dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); | ||
9473 | rtnl_unlock(); | ||
9474 | } | ||
9475 | #endif | ||
9476 | |||
9309 | bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); | 9477 | bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); |
9310 | 9478 | ||
9311 | netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," | 9479 | netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," |
@@ -9349,6 +9517,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
9349 | } | 9517 | } |
9350 | bp = netdev_priv(dev); | 9518 | bp = netdev_priv(dev); |
9351 | 9519 | ||
9520 | #ifdef BCM_CNIC | ||
9521 | /* Delete storage MAC address */ | ||
9522 | if (!NO_FCOE(bp)) { | ||
9523 | rtnl_lock(); | ||
9524 | dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); | ||
9525 | rtnl_unlock(); | ||
9526 | } | ||
9527 | #endif | ||
9528 | |||
9352 | unregister_netdev(dev); | 9529 | unregister_netdev(dev); |
9353 | 9530 | ||
9354 | /* Delete all NAPI objects */ | 9531 | /* Delete all NAPI objects */ |
@@ -9398,7 +9575,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
9398 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 9575 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
9399 | bnx2x_free_skbs(bp); | 9576 | bnx2x_free_skbs(bp); |
9400 | 9577 | ||
9401 | for_each_queue(bp, i) | 9578 | for_each_rx_queue(bp, i) |
9402 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 9579 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
9403 | 9580 | ||
9404 | bnx2x_free_mem(bp); | 9581 | bnx2x_free_mem(bp); |
@@ -9625,7 +9802,8 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) | |||
9625 | break; | 9802 | break; |
9626 | else | 9803 | else |
9627 | atomic_dec(&bp->spq_left); | 9804 | atomic_dec(&bp->spq_left); |
9628 | } else if (type == ISCSI_CONNECTION_TYPE) { | 9805 | } else if ((type == ISCSI_CONNECTION_TYPE) || |
9806 | (type == FCOE_CONNECTION_TYPE)) { | ||
9629 | if (bp->cnic_spq_pending >= | 9807 | if (bp->cnic_spq_pending >= |
9630 | bp->cnic_eth_dev.max_kwqe_pending) | 9808 | bp->cnic_eth_dev.max_kwqe_pending) |
9631 | break; | 9809 | break; |
@@ -9772,6 +9950,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
9772 | case DRV_CTL_START_L2_CMD: { | 9950 | case DRV_CTL_START_L2_CMD: { |
9773 | u32 cli = ctl->data.ring.client_id; | 9951 | u32 cli = ctl->data.ring.client_id; |
9774 | 9952 | ||
9953 | /* Clear FCoE FIP and ALL ENODE MACs addresses first */ | ||
9954 | bnx2x_del_fcoe_eth_macs(bp); | ||
9955 | |||
9775 | /* Set iSCSI MAC address */ | 9956 | /* Set iSCSI MAC address */ |
9776 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | 9957 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); |
9777 | 9958 | ||
@@ -9893,10 +10074,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev) | |||
9893 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | 10074 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; |
9894 | 10075 | ||
9895 | mutex_lock(&bp->cnic_mutex); | 10076 | mutex_lock(&bp->cnic_mutex); |
9896 | if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) { | ||
9897 | bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET; | ||
9898 | bnx2x_set_iscsi_eth_mac_addr(bp, 0); | ||
9899 | } | ||
9900 | cp->drv_state = 0; | 10077 | cp->drv_state = 0; |
9901 | rcu_assign_pointer(bp->cnic_ops, NULL); | 10078 | rcu_assign_pointer(bp->cnic_ops, NULL); |
9902 | mutex_unlock(&bp->cnic_mutex); | 10079 | mutex_unlock(&bp->cnic_mutex); |
@@ -9927,7 +10104,9 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) | |||
9927 | cp->drv_ctl = bnx2x_drv_ctl; | 10104 | cp->drv_ctl = bnx2x_drv_ctl; |
9928 | cp->drv_register_cnic = bnx2x_register_cnic; | 10105 | cp->drv_register_cnic = bnx2x_register_cnic; |
9929 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; | 10106 | cp->drv_unregister_cnic = bnx2x_unregister_cnic; |
9930 | cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID; | 10107 | cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; |
10108 | cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID + | ||
10109 | BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; | ||
9931 | cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; | 10110 | cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; |
9932 | 10111 | ||
9933 | DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " | 10112 | DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " |
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 4733c835dad9..6e4d9b144cc4 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -160,7 +160,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) | |||
160 | 160 | ||
161 | ramrod_data.drv_counter = bp->stats_counter++; | 161 | ramrod_data.drv_counter = bp->stats_counter++; |
162 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | 162 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; |
163 | for_each_queue(bp, i) | 163 | for_each_eth_queue(bp, i) |
164 | ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); | 164 | ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); |
165 | 165 | ||
166 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, | 166 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, |
@@ -766,7 +766,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) | |||
766 | estats->no_buff_discard_hi = 0; | 766 | estats->no_buff_discard_hi = 0; |
767 | estats->no_buff_discard_lo = 0; | 767 | estats->no_buff_discard_lo = 0; |
768 | 768 | ||
769 | for_each_queue(bp, i) { | 769 | for_each_eth_queue(bp, i) { |
770 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 770 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
771 | int cl_id = fp->cl_id; | 771 | int cl_id = fp->cl_id; |
772 | struct tstorm_per_client_stats *tclient = | 772 | struct tstorm_per_client_stats *tclient = |
@@ -996,7 +996,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) | |||
996 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); | 996 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); |
997 | 997 | ||
998 | tmp = estats->mac_discard; | 998 | tmp = estats->mac_discard; |
999 | for_each_queue(bp, i) | 999 | for_each_rx_queue(bp, i) |
1000 | tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); | 1000 | tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); |
1001 | nstats->rx_dropped = tmp; | 1001 | nstats->rx_dropped = tmp; |
1002 | 1002 | ||
@@ -1087,7 +1087,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1087 | bp->dev->name, | 1087 | bp->dev->name, |
1088 | estats->brb_drop_lo, estats->brb_truncate_lo); | 1088 | estats->brb_drop_lo, estats->brb_truncate_lo); |
1089 | 1089 | ||
1090 | for_each_queue(bp, i) { | 1090 | for_each_eth_queue(bp, i) { |
1091 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1091 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1092 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | 1092 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; |
1093 | 1093 | ||
@@ -1101,7 +1101,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1101 | fp->rx_calls, fp->rx_pkt); | 1101 | fp->rx_calls, fp->rx_pkt); |
1102 | } | 1102 | } |
1103 | 1103 | ||
1104 | for_each_queue(bp, i) { | 1104 | for_each_eth_queue(bp, i) { |
1105 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 1105 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
1106 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; | 1106 | struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; |
1107 | struct netdev_queue *txq = | 1107 | struct netdev_queue *txq = |
@@ -1381,7 +1381,8 @@ void bnx2x_stats_init(struct bnx2x *bp) | |||
1381 | memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); | 1381 | memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | for_each_queue(bp, i) { | 1384 | /* FW stats are currently collected for ETH clients only */ |
1385 | for_each_eth_queue(bp, i) { | ||
1385 | /* Set initial stats counter in the stats ramrod data to -1 */ | 1386 | /* Set initial stats counter in the stats ramrod data to -1 */ |
1386 | int cl_id = bp->fp[i].cl_id; | 1387 | int cl_id = bp->fp[i].cl_id; |
1387 | 1388 | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h index afd15efa429a..596798c47452 100644 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ b/drivers/net/bnx2x/bnx2x_stats.h | |||
@@ -53,7 +53,6 @@ struct bnx2x_eth_q_stats { | |||
53 | u32 hw_csum_err; | 53 | u32 hw_csum_err; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define BNX2X_NUM_Q_STATS 13 | ||
57 | #define Q_STATS_OFFSET32(stat_name) \ | 56 | #define Q_STATS_OFFSET32(stat_name) \ |
58 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) | 57 | (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) |
59 | 58 | ||
@@ -225,7 +224,6 @@ struct bnx2x_eth_stats { | |||
225 | u32 nig_timer_max; | 224 | u32 nig_timer_max; |
226 | }; | 225 | }; |
227 | 226 | ||
228 | #define BNX2X_NUM_STATS 43 | ||
229 | #define STATS_OFFSET32(stat_name) \ | 227 | #define STATS_OFFSET32(stat_name) \ |
230 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) | 228 | (offsetof(struct bnx2x_eth_stats, stat_name) / 4) |
231 | 229 | ||