aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x
diff options
context:
space:
mode:
authorAriel Elior <ariele@broadcom.com>2011-07-14 04:31:57 -0400
committerDavid S. Miller <davem@davemloft.net>2011-07-14 19:02:29 -0400
commit6383c0b35b48bfbd0fc8c6fe126a6603c5a9a4b3 (patch)
tree5180305d7ea17edfe1cdfe75d11fe4861a713227 /drivers/net/bnx2x
parent7be08a7222c345798b0697a89ea3dd2c7c83f47c (diff)
bnx2x: Multiple concurrent l2 traffic classes
Overview: Support mapping of priorities to traffic classes and traffic classes to transmission queues ranges in the net device. The queue ranges are (count, offset) pairs relating to the txq array. This can be done via DCBX negotiation or by kernel. As a result Enhanced Transmission Selection (ETS) and Priority Flow Control (PFC) are supported between L2 network traffic classes. Mapping: This patch uses the netdev_set_num_tc, netdev_set_prio_tc_map and netdev_set_tc_queue functions to map priorities to traffic classes and traffic classes to transmission queue ranges. This mapping is performed by bnx2x_setup_tc function which is connected to the ndo_setup_tc. This function is always called at nic load where by default it maps all priorities to tc 0, and it may also be called by the kernel or by the bnx2x upon DCBX negotiation to modify the mapping. rtnl lock: When the ndo_setup_tc is called at nic load or by kernel the rtnl lock is already taken. However, when DCBX negotiation takes place the lock is not taken. The work is therefore scheduled to be handled by the sp_rtnl task. Fastpath: The fastpath structure of the bnx2x which was previously used to hold the information of one tx queue and one rx queue was redesigned to represent multiple tx queues, one for each traffic class. The transmission queue supplied in the skb by the kernel can no longer be interpreted as a straightforward index into the fastpath structure array, but it must rather be decoded to the appropriate fastpath index and the tc within that fastpath. Slowpath: The bnx2x's queue object was redesigned to accommodate multiple transmission queues. The queue object's state machine was enhanced to allow opening multiple transmission-only connections on top of the regular tx-rx connection. Firmware: This feature relies on the tx-only queue feature introduced in the bnx2x 7.0.23 firmware and the FW likewise must have the bnx2x multi cos support. Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r--drivers/net/bnx2x/bnx2x.h181
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c370
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h109
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c25
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c27
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h19
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c521
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.c476
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.h70
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c40
11 files changed, 1316 insertions, 524 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 16dc2c9df8b7..53fa8ea983e9 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -120,6 +120,7 @@ do { \
120 120
121 121
122#ifdef BNX2X_STOP_ON_ERROR 122#ifdef BNX2X_STOP_ON_ERROR
123void bnx2x_int_disable(struct bnx2x *bp);
123#define bnx2x_panic() do { \ 124#define bnx2x_panic() do { \
124 bp->panic = 1; \ 125 bp->panic = 1; \
125 BNX2X_ERR("driver assert\n"); \ 126 BNX2X_ERR("driver assert\n"); \
@@ -240,21 +241,21 @@ do { \
240 */ 241 */
241/* iSCSI L2 */ 242/* iSCSI L2 */
242#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 243#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
243#define BNX2X_ISCSI_ETH_CID 17 244#define BNX2X_ISCSI_ETH_CID 49
244 245
245/* FCoE L2 */ 246/* FCoE L2 */
246#define BNX2X_FCOE_ETH_CL_ID_IDX 2 247#define BNX2X_FCOE_ETH_CL_ID_IDX 2
247#define BNX2X_FCOE_ETH_CID 18 248#define BNX2X_FCOE_ETH_CID 50
248 249
249/** Additional rings budgeting */ 250/** Additional rings budgeting */
250#ifdef BCM_CNIC 251#ifdef BCM_CNIC
251#define CNIC_CONTEXT_USE 1 252#define CNIC_PRESENT 1
252#define FCOE_CONTEXT_USE 1 253#define FCOE_PRESENT 1
253#else 254#else
254#define CNIC_CONTEXT_USE 0 255#define CNIC_PRESENT 0
255#define FCOE_CONTEXT_USE 0 256#define FCOE_PRESENT 0
256#endif /* BCM_CNIC */ 257#endif /* BCM_CNIC */
257#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE) 258#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
258 259
259#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 260#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
260 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 261 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -262,8 +263,35 @@ do { \
262#define SM_RX_ID 0 263#define SM_RX_ID 0
263#define SM_TX_ID 1 264#define SM_TX_ID 1
264 265
265/* fast path */ 266/* defines for multiple tx priority indices */
267#define FIRST_TX_ONLY_COS_INDEX 1
268#define FIRST_TX_COS_INDEX 0
269
270/* defines for decodeing the fastpath index and the cos index out of the
271 * transmission queue index
272 */
273#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
274
275#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
276#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
277
278/* rules for calculating the cids of tx-only connections */
279#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
280#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
281
282/* fp index inside class of service range */
283#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
284
285/*
286 * 0..15 eth cos0
287 * 16..31 eth cos1 if applicable
288 * 32..47 eth cos2 If applicable
289 * fcoe queue follows eth queues (16, 32, 48 depending on cos)
290 */
291#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
292#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
266 293
294/* fast path */
267struct sw_rx_bd { 295struct sw_rx_bd {
268 struct sk_buff *skb; 296 struct sk_buff *skb;
269 DEFINE_DMA_UNMAP_ADDR(mapping); 297 DEFINE_DMA_UNMAP_ADDR(mapping);
@@ -388,6 +416,29 @@ struct bnx2x_agg_info {
388#define Q_STATS_OFFSET32(stat_name) \ 416#define Q_STATS_OFFSET32(stat_name) \
389 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) 417 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
390 418
419struct bnx2x_fp_txdata {
420
421 struct sw_tx_bd *tx_buf_ring;
422
423 union eth_tx_bd_types *tx_desc_ring;
424 dma_addr_t tx_desc_mapping;
425
426 u32 cid;
427
428 union db_prod tx_db;
429
430 u16 tx_pkt_prod;
431 u16 tx_pkt_cons;
432 u16 tx_bd_prod;
433 u16 tx_bd_cons;
434
435 unsigned long tx_pkt;
436
437 __le16 *tx_cons_sb;
438
439 int txq_index;
440};
441
391struct bnx2x_fastpath { 442struct bnx2x_fastpath {
392 struct bnx2x *bp; /* parent */ 443 struct bnx2x *bp; /* parent */
393 444
@@ -404,10 +455,8 @@ struct bnx2x_fastpath {
404 455
405 dma_addr_t status_blk_mapping; 456 dma_addr_t status_blk_mapping;
406 457
407 struct sw_tx_bd *tx_buf_ring; 458 u8 max_cos; /* actual number of active tx coses */
408 459 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
409 union eth_tx_bd_types *tx_desc_ring;
410 dma_addr_t tx_desc_mapping;
411 460
412 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ 461 struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
413 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ 462 struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -426,20 +475,13 @@ struct bnx2x_fastpath {
426 475
427 u32 cid; 476 u32 cid;
428 477
478 __le16 fp_hc_idx;
479
429 u8 index; /* number in fp array */ 480 u8 index; /* number in fp array */
430 u8 cl_id; /* eth client id */ 481 u8 cl_id; /* eth client id */
431 u8 cl_qzone_id; 482 u8 cl_qzone_id;
432 u8 fw_sb_id; /* status block number in FW */ 483 u8 fw_sb_id; /* status block number in FW */
433 u8 igu_sb_id; /* status block number in HW */ 484 u8 igu_sb_id; /* status block number in HW */
434 union db_prod tx_db;
435
436 u16 tx_pkt_prod;
437 u16 tx_pkt_cons;
438 u16 tx_bd_prod;
439 u16 tx_bd_cons;
440 __le16 *tx_cons_sb;
441
442 __le16 fp_hc_idx;
443 485
444 u16 rx_bd_prod; 486 u16 rx_bd_prod;
445 u16 rx_bd_cons; 487 u16 rx_bd_cons;
@@ -449,8 +491,7 @@ struct bnx2x_fastpath {
449 /* The last maximal completed SGE */ 491 /* The last maximal completed SGE */
450 u16 last_max_sge; 492 u16 last_max_sge;
451 __le16 *rx_cons_sb; 493 __le16 *rx_cons_sb;
452 unsigned long tx_pkt, 494 unsigned long rx_pkt,
453 rx_pkt,
454 rx_calls; 495 rx_calls;
455 496
456 /* TPA related */ 497 /* TPA related */
@@ -489,8 +530,12 @@ struct bnx2x_fastpath {
489#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 530#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
490#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) 531#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
491#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) 532#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
533#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
534 txdata[FIRST_TX_COS_INDEX].var)
492 535
493 536
537#define IS_ETH_FP(fp) (fp->index < \
538 BNX2X_NUM_ETH_QUEUES(fp->bp))
494#ifdef BCM_CNIC 539#ifdef BCM_CNIC
495#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) 540#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
496#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) 541#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
@@ -649,18 +694,23 @@ struct bnx2x_fastpath {
649 694
650#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ 695#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
651 /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ 696 /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
652#define HC_INDEX_ETH_TX_CQ_CONS 5 /* Formerly Cstorm ETH CQ index */ 697#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
698 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
699#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
700 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
701#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
653 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 702 /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
654 703
655#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_ETH_RX_CQ_CONS 704#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
656#define U_SB_ETH_RX_BD_INDEX HC_INDEX_ETH_RX_BD_CONS 705
657#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_ETH_TX_CQ_CONS
658 706
659#define BNX2X_RX_SB_INDEX \ 707#define BNX2X_RX_SB_INDEX \
660 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) 708 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
661 709
662#define BNX2X_TX_SB_INDEX \ 710#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
663 (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) 711
712#define BNX2X_TX_SB_INDEX_COS0 \
713 (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
664 714
665/* end of fast path */ 715/* end of fast path */
666 716
@@ -845,25 +895,6 @@ extern struct workqueue_struct *bnx2x_wq;
845/* fast-path interrupt contexts E2 */ 895/* fast-path interrupt contexts E2 */
846#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 896#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
847 897
848/*
849 * cid_cnt paramter below refers to the value returned by
850 * 'bnx2x_get_l2_cid_count()' routine
851 */
852
853/*
854 * The number of FP context allocated by the driver == max number of regular
855 * L2 queues + 1 for the FCoE L2 queue
856 */
857#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
858
859/*
860 * The number of FP-SB allocated by the driver == max number of regular L2
861 * queues + 1 for the CNIC which also consumes an FP-SB
862 */
863#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
864#define NUM_IGU_SB_REQUIRED(cid_cnt) \
865 (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
866
867union cdu_context { 898union cdu_context {
868 struct eth_context eth; 899 struct eth_context eth;
869 char pad[1024]; 900 char pad[1024];
@@ -871,7 +902,7 @@ union cdu_context {
871 902
872/* CDU host DB constants */ 903/* CDU host DB constants */
873#define CDU_ILT_PAGE_SZ_HW 3 904#define CDU_ILT_PAGE_SZ_HW 3
874#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */ 905#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
875#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 906#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
876 907
877#ifdef BCM_CNIC 908#ifdef BCM_CNIC
@@ -1048,6 +1079,7 @@ struct bnx2x_fw_stats_data {
1048 1079
1049/* Public slow path states */ 1080/* Public slow path states */
1050enum { 1081enum {
1082 BNX2X_SP_RTNL_SETUP_TC,
1051 BNX2X_SP_RTNL_TX_TIMEOUT, 1083 BNX2X_SP_RTNL_TX_TIMEOUT,
1052}; 1084};
1053 1085
@@ -1226,6 +1258,10 @@ struct bnx2x {
1226#define BNX2X_STATE_ERROR 0xf000 1258#define BNX2X_STATE_ERROR 0xf000
1227 1259
1228 int multi_mode; 1260 int multi_mode;
1261#define BNX2X_MAX_PRIORITY 8
1262#define BNX2X_MAX_ENTRIES_PER_PRI 16
1263#define BNX2X_MAX_COS 3
1264#define BNX2X_MAX_TX_COS 2
1229 int num_queues; 1265 int num_queues;
1230 int disable_tpa; 1266 int disable_tpa;
1231 1267
@@ -1275,11 +1311,21 @@ struct bnx2x {
1275 struct bnx2x_ilt *ilt; 1311 struct bnx2x_ilt *ilt;
1276#define BP_ILT(bp) ((bp)->ilt) 1312#define BP_ILT(bp) ((bp)->ilt)
1277#define ILT_MAX_LINES 256 1313#define ILT_MAX_LINES 256
1314/*
1315 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
1316 * to CNIC.
1317 */
1318#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
1278 1319
1279 int l2_cid_count; 1320/*
1280#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ 1321 * Maximum CID count that might be required by the bnx2x:
1281 ILT_PAGE_CIDS)) 1322 * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
1282#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT)) 1323 */
1324#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
1325 NON_ETH_CONTEXT_USE + CNIC_PRESENT)
1326#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1327 ILT_PAGE_CIDS))
1328#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
1283 1329
1284 int qm_cid_count; 1330 int qm_cid_count;
1285 1331
@@ -1421,16 +1467,24 @@ struct bnx2x {
1421 u32 dcbx_remote_flags; 1467 u32 dcbx_remote_flags;
1422#endif 1468#endif
1423 u32 pending_max; 1469 u32 pending_max;
1470
1471 /* multiple tx classes of service */
1472 u8 max_cos;
1473
1474 /* priority to cos mapping */
1475 u8 prio_to_cos[8];
1424}; 1476};
1425 1477
1426/* Tx queues may be less or equal to Rx queues */ 1478/* Tx queues may be less or equal to Rx queues */
1427extern int num_queues; 1479extern int num_queues;
1428#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1480#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1429#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) 1481#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
1482#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1430 1483
1431#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1484#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1432 1485
1433#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) 1486#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
1487/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
1434 1488
1435#define RSS_IPV4_CAP_MASK \ 1489#define RSS_IPV4_CAP_MASK \
1436 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY 1490 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1465,35 +1519,40 @@ struct bnx2x_func_init_params {
1465}; 1519};
1466 1520
1467#define for_each_eth_queue(bp, var) \ 1521#define for_each_eth_queue(bp, var) \
1468 for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) 1522 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1469 1523
1470#define for_each_nondefault_eth_queue(bp, var) \ 1524#define for_each_nondefault_eth_queue(bp, var) \
1471 for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) 1525 for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1472 1526
1473#define for_each_queue(bp, var) \ 1527#define for_each_queue(bp, var) \
1474 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ 1528 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1475 if (skip_queue(bp, var)) \ 1529 if (skip_queue(bp, var)) \
1476 continue; \ 1530 continue; \
1477 else 1531 else
1478 1532
1533/* Skip forwarding FP */
1479#define for_each_rx_queue(bp, var) \ 1534#define for_each_rx_queue(bp, var) \
1480 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ 1535 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1481 if (skip_rx_queue(bp, var)) \ 1536 if (skip_rx_queue(bp, var)) \
1482 continue; \ 1537 continue; \
1483 else 1538 else
1484 1539
1540/* Skip OOO FP */
1485#define for_each_tx_queue(bp, var) \ 1541#define for_each_tx_queue(bp, var) \
1486 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ 1542 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1487 if (skip_tx_queue(bp, var)) \ 1543 if (skip_tx_queue(bp, var)) \
1488 continue; \ 1544 continue; \
1489 else 1545 else
1490 1546
1491#define for_each_nondefault_queue(bp, var) \ 1547#define for_each_nondefault_queue(bp, var) \
1492 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \ 1548 for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1493 if (skip_queue(bp, var)) \ 1549 if (skip_queue(bp, var)) \
1494 continue; \ 1550 continue; \
1495 else 1551 else
1496 1552
1553#define for_each_cos_in_tx_queue(fp, var) \
1554 for ((var) = 0; (var) < (fp)->max_cos; (var)++)
1555
1497/* skip rx queue 1556/* skip rx queue
1498 * if FCOE l2 support is disabled and this is the fcoe L2 queue 1557 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1499 */ 1558 */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 8763625b09d0..e5fac6244f4a 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -47,6 +47,25 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
47 47
48 /* Restore the NAPI object as it has been already initialized */ 48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi; 49 fp->napi = orig_napi;
50
51 fp->bp = bp;
52 fp->index = index;
53 if (IS_ETH_FP(fp))
54 fp->max_cos = bp->max_cos;
55 else
56 /* Special queues support only one CoS */
57 fp->max_cos = 1;
58
59 /*
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
62 */
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65#ifdef BCM_CNIC
66 /* We don't want TPA on FCoE, FWD and OOO L2 rings */
67 bnx2x_fcoe(bp, disable_tpa) = 1;
68#endif
50} 69}
51 70
52/** 71/**
@@ -77,10 +96,10 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
77/* free skb in the packet ring at pos idx 96/* free skb in the packet ring at pos idx
78 * return idx of last bd freed 97 * return idx of last bd freed
79 */ 98 */
80static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, 99static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
81 u16 idx) 100 u16 idx)
82{ 101{
83 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; 102 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
84 struct eth_tx_start_bd *tx_start_bd; 103 struct eth_tx_start_bd *tx_start_bd;
85 struct eth_tx_bd *tx_data_bd; 104 struct eth_tx_bd *tx_data_bd;
86 struct sk_buff *skb = tx_buf->skb; 105 struct sk_buff *skb = tx_buf->skb;
@@ -91,11 +110,11 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
91 prefetch(&skb->end); 110 prefetch(&skb->end);
92 111
93 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 112 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
94 fp->index, idx, tx_buf, skb); 113 txdata->txq_index, idx, tx_buf, skb);
95 114
96 /* unmap first bd */ 115 /* unmap first bd */
97 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 116 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
98 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 117 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
99 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 118 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
100 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); 119 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
101 120
@@ -126,7 +145,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
126 while (nbd > 0) { 145 while (nbd > 0) {
127 146
128 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 147 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
129 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; 148 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
130 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), 149 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
131 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); 150 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
132 if (--nbd) 151 if (--nbd)
@@ -142,20 +161,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
142 return new_cons; 161 return new_cons;
143} 162}
144 163
145int bnx2x_tx_int(struct bnx2x_fastpath *fp) 164int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
146{ 165{
147 struct bnx2x *bp = fp->bp;
148 struct netdev_queue *txq; 166 struct netdev_queue *txq;
149 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; 167 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
150 168
151#ifdef BNX2X_STOP_ON_ERROR 169#ifdef BNX2X_STOP_ON_ERROR
152 if (unlikely(bp->panic)) 170 if (unlikely(bp->panic))
153 return -1; 171 return -1;
154#endif 172#endif
155 173
156 txq = netdev_get_tx_queue(bp->dev, fp->index); 174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
157 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
158 sw_cons = fp->tx_pkt_cons; 176 sw_cons = txdata->tx_pkt_cons;
159 177
160 while (sw_cons != hw_cons) { 178 while (sw_cons != hw_cons) {
161 u16 pkt_cons; 179 u16 pkt_cons;
@@ -164,14 +182,14 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
164 182
165 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " 183 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
166 " pkt_cons %u\n", 184 " pkt_cons %u\n",
167 fp->index, hw_cons, sw_cons, pkt_cons); 185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
168 186
169 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
170 sw_cons++; 188 sw_cons++;
171 } 189 }
172 190
173 fp->tx_pkt_cons = sw_cons; 191 txdata->tx_pkt_cons = sw_cons;
174 fp->tx_bd_cons = bd_cons; 192 txdata->tx_bd_cons = bd_cons;
175 193
176 /* Need to make the tx_bd_cons update visible to start_xmit() 194 /* Need to make the tx_bd_cons update visible to start_xmit()
177 * before checking for netif_tx_queue_stopped(). Without the 195 * before checking for netif_tx_queue_stopped(). Without the
@@ -199,7 +217,7 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
199 217
200 if ((netif_tx_queue_stopped(txq)) && 218 if ((netif_tx_queue_stopped(txq)) &&
201 (bp->state == BNX2X_STATE_OPEN) && 219 (bp->state == BNX2X_STATE_OPEN) &&
202 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 220 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
203 netif_tx_wake_queue(txq); 221 netif_tx_wake_queue(txq);
204 222
205 __netif_tx_unlock(txq); 223 __netif_tx_unlock(txq);
@@ -777,6 +795,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
777{ 795{
778 struct bnx2x_fastpath *fp = fp_cookie; 796 struct bnx2x_fastpath *fp = fp_cookie;
779 struct bnx2x *bp = fp->bp; 797 struct bnx2x *bp = fp->bp;
798 u8 cos;
780 799
781 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " 800 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
782 "[fp %d fw_sd %d igusb %d]\n", 801 "[fp %d fw_sd %d igusb %d]\n",
@@ -790,7 +809,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
790 809
791 /* Handle Rx and Tx according to MSI-X vector */ 810 /* Handle Rx and Tx according to MSI-X vector */
792 prefetch(fp->rx_cons_sb); 811 prefetch(fp->rx_cons_sb);
793 prefetch(fp->tx_cons_sb); 812
813 for_each_cos_in_tx_queue(fp, cos)
814 prefetch(fp->txdata[cos].tx_cons_sb);
815
794 prefetch(&fp->sb_running_index[SM_RX_ID]); 816 prefetch(&fp->sb_running_index[SM_RX_ID]);
795 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 817 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
796 818
@@ -1060,17 +1082,22 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1060static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1082static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1061{ 1083{
1062 int i; 1084 int i;
1085 u8 cos;
1063 1086
1064 for_each_tx_queue(bp, i) { 1087 for_each_tx_queue(bp, i) {
1065 struct bnx2x_fastpath *fp = &bp->fp[i]; 1088 struct bnx2x_fastpath *fp = &bp->fp[i];
1089 for_each_cos_in_tx_queue(fp, cos) {
1090 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1066 1091
1067 u16 bd_cons = fp->tx_bd_cons; 1092 u16 bd_cons = txdata->tx_bd_cons;
1068 u16 sw_prod = fp->tx_pkt_prod; 1093 u16 sw_prod = txdata->tx_pkt_prod;
1069 u16 sw_cons = fp->tx_pkt_cons; 1094 u16 sw_cons = txdata->tx_pkt_cons;
1070 1095
1071 while (sw_cons != sw_prod) { 1096 while (sw_cons != sw_prod) {
1072 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); 1097 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1073 sw_cons++; 1098 TX_BD(sw_cons));
1099 sw_cons++;
1100 }
1074 } 1101 }
1075 } 1102 }
1076} 1103}
@@ -1174,7 +1201,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1174{ 1201{
1175 if (bp->flags & USING_MSIX_FLAG) 1202 if (bp->flags & USING_MSIX_FLAG)
1176 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1203 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1177 CNIC_CONTEXT_USE + 1); 1204 CNIC_PRESENT + 1);
1178 else if (bp->flags & USING_MSI_FLAG) 1205 else if (bp->flags & USING_MSI_FLAG)
1179 free_irq(bp->pdev->irq, bp->dev); 1206 free_irq(bp->pdev->irq, bp->dev);
1180 else 1207 else
@@ -1196,6 +1223,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1196 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1223 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1197 msix_vec++; 1224 msix_vec++;
1198#endif 1225#endif
1226 /* We need separate vectors for ETH queues only (not FCoE) */
1199 for_each_eth_queue(bp, i) { 1227 for_each_eth_queue(bp, i) {
1200 bp->msix_table[msix_vec].entry = msix_vec; 1228 bp->msix_table[msix_vec].entry = msix_vec;
1201 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " 1229 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
@@ -1203,7 +1231,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1203 msix_vec++; 1231 msix_vec++;
1204 } 1232 }
1205 1233
1206 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1; 1234 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1207 1235
1208 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1236 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1209 1237
@@ -1278,7 +1306,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1278 } 1306 }
1279 1307
1280 i = BNX2X_NUM_ETH_QUEUES(bp); 1308 i = BNX2X_NUM_ETH_QUEUES(bp);
1281 offset = 1 + CNIC_CONTEXT_USE; 1309 offset = 1 + CNIC_PRESENT;
1282 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" 1310 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1283 " ... fp[%d] %d\n", 1311 " ... fp[%d] %d\n",
1284 bp->msix_table[0].vector, 1312 bp->msix_table[0].vector,
@@ -1393,13 +1421,12 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1393 1421
1394 /* If ethertype is FCoE or FIP - use FCoE ring */ 1422 /* If ethertype is FCoE or FIP - use FCoE ring */
1395 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1423 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1396 return bnx2x_fcoe(bp, index); 1424 return bnx2x_fcoe_tx(bp, txq_index);
1397 } 1425 }
1398#endif 1426#endif
1399 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring 1427 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1400 */ 1428 */
1401 return __skb_tx_hash(dev, skb, 1429 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1402 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1403} 1430}
1404 1431
1405void bnx2x_set_num_queues(struct bnx2x *bp) 1432void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -1418,20 +1445,38 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1418 } 1445 }
1419 1446
1420 /* Add special queues */ 1447 /* Add special queues */
1421 bp->num_queues += NONE_ETH_CONTEXT_USE; 1448 bp->num_queues += NON_ETH_CONTEXT_USE;
1422} 1449}
1423 1450
1424static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1451static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1425{ 1452{
1426 int rc, num = bp->num_queues; 1453 int rc, tx, rx;
1427 1454
1428#ifdef BCM_CNIC 1455 tx = MAX_TXQS_PER_COS * bp->max_cos;
1429 if (NO_FCOE(bp)) 1456 rx = BNX2X_NUM_ETH_QUEUES(bp);
1430 num -= FCOE_CONTEXT_USE;
1431 1457
1458/* account for fcoe queue */
1459#ifdef BCM_CNIC
1460 if (!NO_FCOE(bp)) {
1461 rx += FCOE_PRESENT;
1462 tx += FCOE_PRESENT;
1463 }
1432#endif 1464#endif
1433 netif_set_real_num_tx_queues(bp->dev, num); 1465
1434 rc = netif_set_real_num_rx_queues(bp->dev, num); 1466 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1467 if (rc) {
1468 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1469 return rc;
1470 }
1471 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1472 if (rc) {
1473 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1474 return rc;
1475 }
1476
1477 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1478 tx, rx);
1479
1435 return rc; 1480 return rc;
1436} 1481}
1437 1482
@@ -1661,28 +1706,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1661 /* must be called before memory allocation and HW init */ 1706 /* must be called before memory allocation and HW init */
1662 bnx2x_ilt_set_info(bp); 1707 bnx2x_ilt_set_info(bp);
1663 1708
1664 /* zero fastpath structures preserving invariants like napi which are 1709 /*
1665 * allocated only once 1710 * Zero fastpath structures preserving invariants like napi, which are
1711 * allocated only once, fp index, max_cos, bp pointer.
1712 * Also set fp->disable_tpa.
1666 */ 1713 */
1667 for_each_queue(bp, i) 1714 for_each_queue(bp, i)
1668 bnx2x_bz_fp(bp, i); 1715 bnx2x_bz_fp(bp, i);
1669 1716
1717
1670 /* Set the receive queues buffer size */ 1718 /* Set the receive queues buffer size */
1671 bnx2x_set_rx_buf_size(bp); 1719 bnx2x_set_rx_buf_size(bp);
1672 1720
1673 /*
1674 * set the tpa flag for each queue. The tpa flag determines the queue
1675 * minimal size so it must be set prior to queue memory allocation
1676 */
1677 for_each_queue(bp, i)
1678 bnx2x_fp(bp, i, disable_tpa) =
1679 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1680
1681#ifdef BCM_CNIC
1682 /* We don't want TPA on FCoE L2 ring */
1683 bnx2x_fcoe(bp, disable_tpa) = 1;
1684#endif
1685
1686 if (bnx2x_alloc_mem(bp)) 1721 if (bnx2x_alloc_mem(bp))
1687 return -ENOMEM; 1722 return -ENOMEM;
1688 1723
@@ -1696,6 +1731,12 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1696 LOAD_ERROR_EXIT(bp, load_error0); 1731 LOAD_ERROR_EXIT(bp, load_error0);
1697 } 1732 }
1698 1733
1734 /* configure multi cos mappings in kernel.
1735 * this configuration may be overriden by a multi class queue discipline
1736 * or by a dcbx negotiation result.
1737 */
1738 bnx2x_setup_tc(bp->dev, bp->max_cos);
1739
1699 bnx2x_napi_enable(bp); 1740 bnx2x_napi_enable(bp);
1700 1741
1701 /* Send LOAD_REQUEST command to MCP 1742 /* Send LOAD_REQUEST command to MCP
@@ -1747,6 +1788,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1747 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 1788 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1748 } else 1789 } else
1749 bp->port.pmf = 0; 1790 bp->port.pmf = 0;
1791
1750 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); 1792 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1751 1793
1752 /* Init Function state controlling object */ 1794 /* Init Function state controlling object */
@@ -2089,6 +2131,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2089int bnx2x_poll(struct napi_struct *napi, int budget) 2131int bnx2x_poll(struct napi_struct *napi, int budget)
2090{ 2132{
2091 int work_done = 0; 2133 int work_done = 0;
2134 u8 cos;
2092 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 2135 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2093 napi); 2136 napi);
2094 struct bnx2x *bp = fp->bp; 2137 struct bnx2x *bp = fp->bp;
@@ -2101,8 +2144,10 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2101 } 2144 }
2102#endif 2145#endif
2103 2146
2104 if (bnx2x_has_tx_work(fp)) 2147 for_each_cos_in_tx_queue(fp, cos)
2105 bnx2x_tx_int(fp); 2148 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2149 bnx2x_tx_int(bp, &fp->txdata[cos]);
2150
2106 2151
2107 if (bnx2x_has_rx_work(fp)) { 2152 if (bnx2x_has_rx_work(fp)) {
2108 work_done += bnx2x_rx_int(fp, budget - work_done); 2153 work_done += bnx2x_rx_int(fp, budget - work_done);
@@ -2164,7 +2209,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2164 * in Other Operating Systems(TM) 2209 * in Other Operating Systems(TM)
2165 */ 2210 */
2166static noinline u16 bnx2x_tx_split(struct bnx2x *bp, 2211static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2167 struct bnx2x_fastpath *fp, 2212 struct bnx2x_fp_txdata *txdata,
2168 struct sw_tx_bd *tx_buf, 2213 struct sw_tx_bd *tx_buf,
2169 struct eth_tx_start_bd **tx_bd, u16 hlen, 2214 struct eth_tx_start_bd **tx_bd, u16 hlen,
2170 u16 bd_prod, int nbd) 2215 u16 bd_prod, int nbd)
@@ -2185,7 +2230,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2185 /* now get a new data BD 2230 /* now get a new data BD
2186 * (after the pbd) and fill it */ 2231 * (after the pbd) and fill it */
2187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2232 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2188 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 2233 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2189 2234
2190 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), 2235 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2191 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; 2236 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
@@ -2481,8 +2526,10 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2481netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 2526netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2482{ 2527{
2483 struct bnx2x *bp = netdev_priv(dev); 2528 struct bnx2x *bp = netdev_priv(dev);
2529
2484 struct bnx2x_fastpath *fp; 2530 struct bnx2x_fastpath *fp;
2485 struct netdev_queue *txq; 2531 struct netdev_queue *txq;
2532 struct bnx2x_fp_txdata *txdata;
2486 struct sw_tx_bd *tx_buf; 2533 struct sw_tx_bd *tx_buf;
2487 struct eth_tx_start_bd *tx_start_bd, *first_bd; 2534 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2488 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 2535 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
@@ -2490,7 +2537,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2490 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 2537 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2491 u32 pbd_e2_parsing_data = 0; 2538 u32 pbd_e2_parsing_data = 0;
2492 u16 pkt_prod, bd_prod; 2539 u16 pkt_prod, bd_prod;
2493 int nbd, fp_index; 2540 int nbd, txq_index, fp_index, txdata_index;
2494 dma_addr_t mapping; 2541 dma_addr_t mapping;
2495 u32 xmit_type = bnx2x_xmit_type(bp, skb); 2542 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2496 int i; 2543 int i;
@@ -2504,12 +2551,43 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2504 return NETDEV_TX_BUSY; 2551 return NETDEV_TX_BUSY;
2505#endif 2552#endif
2506 2553
2507 fp_index = skb_get_queue_mapping(skb); 2554 txq_index = skb_get_queue_mapping(skb);
2508 txq = netdev_get_tx_queue(dev, fp_index); 2555 txq = netdev_get_tx_queue(dev, txq_index);
2556
2557 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2558
2559 /* decode the fastpath index and the cos index from the txq */
2560 fp_index = TXQ_TO_FP(txq_index);
2561 txdata_index = TXQ_TO_COS(txq_index);
2562
2563#ifdef BCM_CNIC
2564 /*
2565 * Override the above for the FCoE queue:
2566 * - FCoE fp entry is right after the ETH entries.
2567 * - FCoE L2 queue uses bp->txdata[0] only.
2568 */
2569 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2570 bnx2x_fcoe_tx(bp, txq_index)))) {
2571 fp_index = FCOE_IDX;
2572 txdata_index = 0;
2573 }
2574#endif
2575
2576 /* enable this debug print to view the transmission queue being used
2577 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2578 txq_index, fp_index, txdata_index); */
2509 2579
2580 /* locate the fastpath and the txdata */
2510 fp = &bp->fp[fp_index]; 2581 fp = &bp->fp[fp_index];
2582 txdata = &fp->txdata[txdata_index];
2583
2584 /* enable this debug print to view the tranmission details
2585 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2586 " tx_data ptr %p fp pointer %p",
2587 txdata->cid, fp_index, txdata_index, txdata, fp); */
2511 2588
2512 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 2589 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2590 (skb_shinfo(skb)->nr_frags + 3))) {
2513 fp->eth_q_stats.driver_xoff++; 2591 fp->eth_q_stats.driver_xoff++;
2514 netif_tx_stop_queue(txq); 2592 netif_tx_stop_queue(txq);
2515 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2593 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -2518,7 +2596,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2518 2596
2519 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " 2597 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2520 "protocol(%x,%x) gso type %x xmit_type %x\n", 2598 "protocol(%x,%x) gso type %x xmit_type %x\n",
2521 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 2599 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2522 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 2600 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2523 2601
2524 eth = (struct ethhdr *)skb->data; 2602 eth = (struct ethhdr *)skb->data;
@@ -2567,15 +2645,15 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2567 /* get current pkt produced now - advance it just before sending packet 2645 /* get current pkt produced now - advance it just before sending packet
2568 * since mapping of pages may fail and cause packet to be dropped 2646 * since mapping of pages may fail and cause packet to be dropped
2569 */ 2647 */
2570 pkt_prod = fp->tx_pkt_prod; 2648 pkt_prod = txdata->tx_pkt_prod;
2571 bd_prod = TX_BD(fp->tx_bd_prod); 2649 bd_prod = TX_BD(txdata->tx_bd_prod);
2572 2650
2573 /* get a tx_buf and first BD 2651 /* get a tx_buf and first BD
2574 * tx_start_bd may be changed during SPLIT, 2652 * tx_start_bd may be changed during SPLIT,
2575 * but first_bd will always stay first 2653 * but first_bd will always stay first
2576 */ 2654 */
2577 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; 2655 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2578 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 2656 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2579 first_bd = tx_start_bd; 2657 first_bd = tx_start_bd;
2580 2658
2581 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2659 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
@@ -2586,13 +2664,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2586 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); 2664 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2587 2665
2588 /* remember the first BD of the packet */ 2666 /* remember the first BD of the packet */
2589 tx_buf->first_bd = fp->tx_bd_prod; 2667 tx_buf->first_bd = txdata->tx_bd_prod;
2590 tx_buf->skb = skb; 2668 tx_buf->skb = skb;
2591 tx_buf->flags = 0; 2669 tx_buf->flags = 0;
2592 2670
2593 DP(NETIF_MSG_TX_QUEUED, 2671 DP(NETIF_MSG_TX_QUEUED,
2594 "sending pkt %u @%p next_idx %u bd %u @%p\n", 2672 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2595 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 2673 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2596 2674
2597 if (vlan_tx_tag_present(skb)) { 2675 if (vlan_tx_tag_present(skb)) {
2598 tx_start_bd->vlan_or_ethertype = 2676 tx_start_bd->vlan_or_ethertype =
@@ -2609,7 +2687,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2609 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); 2687 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2610 2688
2611 if (!CHIP_IS_E1x(bp)) { 2689 if (!CHIP_IS_E1x(bp)) {
2612 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2; 2690 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2613 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2691 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2614 /* Set PBD in checksum offload case */ 2692 /* Set PBD in checksum offload case */
2615 if (xmit_type & XMIT_CSUM) 2693 if (xmit_type & XMIT_CSUM)
@@ -2631,7 +2709,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2631 eth->h_dest); 2709 eth->h_dest);
2632 } 2710 }
2633 } else { 2711 } else {
2634 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; 2712 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2635 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2713 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2636 /* Set PBD in checksum offload case */ 2714 /* Set PBD in checksum offload case */
2637 if (xmit_type & XMIT_CSUM) 2715 if (xmit_type & XMIT_CSUM)
@@ -2663,8 +2741,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2663 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 2741 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2664 2742
2665 if (unlikely(skb_headlen(skb) > hlen)) 2743 if (unlikely(skb_headlen(skb) > hlen))
2666 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2744 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2667 hlen, bd_prod, ++nbd); 2745 &tx_start_bd, hlen,
2746 bd_prod, ++nbd);
2668 if (!CHIP_IS_E1x(bp)) 2747 if (!CHIP_IS_E1x(bp))
2669 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 2748 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2670 xmit_type); 2749 xmit_type);
@@ -2698,14 +2777,15 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2698 * before call to bnx2x_free_tx_pkt 2777 * before call to bnx2x_free_tx_pkt
2699 */ 2778 */
2700 first_bd->nbd = cpu_to_le16(nbd); 2779 first_bd->nbd = cpu_to_le16(nbd);
2701 bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod)); 2780 bnx2x_free_tx_pkt(bp, txdata,
2781 TX_BD(txdata->tx_pkt_prod));
2702 return NETDEV_TX_OK; 2782 return NETDEV_TX_OK;
2703 } 2783 }
2704 2784
2705 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2785 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2706 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 2786 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2707 if (total_pkt_bd == NULL) 2787 if (total_pkt_bd == NULL)
2708 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 2788 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2709 2789
2710 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2790 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2711 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2791 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -2759,7 +2839,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2759 pbd_e2->parsing_data); 2839 pbd_e2->parsing_data);
2760 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2840 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2761 2841
2762 fp->tx_pkt_prod++; 2842 txdata->tx_pkt_prod++;
2763 /* 2843 /*
2764 * Make sure that the BD data is updated before updating the producer 2844 * Make sure that the BD data is updated before updating the producer
2765 * since FW might read the BD right after the producer is updated. 2845 * since FW might read the BD right after the producer is updated.
@@ -2769,16 +2849,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2769 */ 2849 */
2770 wmb(); 2850 wmb();
2771 2851
2772 fp->tx_db.data.prod += nbd; 2852 txdata->tx_db.data.prod += nbd;
2773 barrier(); 2853 barrier();
2774 2854
2775 DOORBELL(bp, fp->cid, fp->tx_db.raw); 2855 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2776 2856
2777 mmiowb(); 2857 mmiowb();
2778 2858
2779 fp->tx_bd_prod += nbd; 2859 txdata->tx_bd_prod += nbd;
2780 2860
2781 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 2861 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2782 netif_tx_stop_queue(txq); 2862 netif_tx_stop_queue(txq);
2783 2863
2784 /* paired memory barrier is in bnx2x_tx_int(), we have to keep 2864 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -2787,14 +2867,81 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2787 smp_mb(); 2867 smp_mb();
2788 2868
2789 fp->eth_q_stats.driver_xoff++; 2869 fp->eth_q_stats.driver_xoff++;
2790 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 2870 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2791 netif_tx_wake_queue(txq); 2871 netif_tx_wake_queue(txq);
2792 } 2872 }
2793 fp->tx_pkt++; 2873 txdata->tx_pkt++;
2794 2874
2795 return NETDEV_TX_OK; 2875 return NETDEV_TX_OK;
2796} 2876}
2797 2877
2878/**
2879 * bnx2x_setup_tc - routine to configure net_device for multi tc
2880 *
2881 * @netdev: net device to configure
2882 * @tc: number of traffic classes to enable
2883 *
2884 * callback connected to the ndo_setup_tc function pointer
2885 */
2886int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2887{
2888 int cos, prio, count, offset;
2889 struct bnx2x *bp = netdev_priv(dev);
2890
2891 /* setup tc must be called under rtnl lock */
2892 ASSERT_RTNL();
2893
2894 /* no traffic classes requested. aborting */
2895 if (!num_tc) {
2896 netdev_reset_tc(dev);
2897 return 0;
2898 }
2899
2900 /* requested to support too many traffic classes */
2901 if (num_tc > bp->max_cos) {
2902 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2903 " requested: %d. max supported is %d",
2904 num_tc, bp->max_cos);
2905 return -EINVAL;
2906 }
2907
2908 /* declare amount of supported traffic classes */
2909 if (netdev_set_num_tc(dev, num_tc)) {
2910 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2911 num_tc);
2912 return -EINVAL;
2913 }
2914
2915 /* configure priority to traffic class mapping */
2916 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2917 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2918 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2919 prio, bp->prio_to_cos[prio]);
2920 }
2921
2922
2923 /* Use this configuration to diffrentiate tc0 from other COSes
2924 This can be used for ets or pfc, and save the effort of setting
2925 up a multio class queue disc or negotiating DCBX with a switch
2926 netdev_set_prio_tc_map(dev, 0, 0);
2927 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2928 for (prio = 1; prio < 16; prio++) {
2929 netdev_set_prio_tc_map(dev, prio, 1);
2930 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2931 } */
2932
2933 /* configure traffic class to transmission queue mapping */
2934 for (cos = 0; cos < bp->max_cos; cos++) {
2935 count = BNX2X_NUM_ETH_QUEUES(bp);
2936 offset = cos * MAX_TXQS_PER_COS;
2937 netdev_set_tc_queue(dev, cos, count, offset);
2938 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2939 cos, offset, count);
2940 }
2941
2942 return 0;
2943}
2944
2798/* called with rtnl_lock */ 2945/* called with rtnl_lock */
2799int bnx2x_change_mac_addr(struct net_device *dev, void *p) 2946int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2800{ 2947{
@@ -2823,6 +2970,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2823{ 2970{
2824 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); 2971 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2825 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; 2972 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2973 u8 cos;
2826 2974
2827 /* Common */ 2975 /* Common */
2828#ifdef BCM_CNIC 2976#ifdef BCM_CNIC
@@ -2871,10 +3019,18 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2871 /* Tx */ 3019 /* Tx */
2872 if (!skip_tx_queue(bp, fp_index)) { 3020 if (!skip_tx_queue(bp, fp_index)) {
2873 /* fastpath tx rings: tx_buf tx_desc */ 3021 /* fastpath tx rings: tx_buf tx_desc */
2874 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring)); 3022 for_each_cos_in_tx_queue(fp, cos) {
2875 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring), 3023 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
2876 bnx2x_fp(bp, fp_index, tx_desc_mapping), 3024
2877 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 3025 DP(BNX2X_MSG_SP,
3026 "freeing tx memory of fp %d cos %d cid %d",
3027 fp_index, cos, txdata->cid);
3028
3029 BNX2X_FREE(txdata->tx_buf_ring);
3030 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3031 txdata->tx_desc_mapping,
3032 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3033 }
2878 } 3034 }
2879 /* end of fastpath */ 3035 /* end of fastpath */
2880} 3036}
@@ -2907,19 +3063,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2907 union host_hc_status_block *sb; 3063 union host_hc_status_block *sb;
2908 struct bnx2x_fastpath *fp = &bp->fp[index]; 3064 struct bnx2x_fastpath *fp = &bp->fp[index];
2909 int ring_size = 0; 3065 int ring_size = 0;
3066 u8 cos;
2910 3067
2911 /* if rx_ring_size specified - use it */ 3068 /* if rx_ring_size specified - use it */
2912 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3069 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2913 MAX_RX_AVAIL/bp->num_queues; 3070 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
2914 3071
2915 /* allocate at least number of buffers required by FW */ 3072 /* allocate at least number of buffers required by FW */
2916 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3073 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2917 MIN_RX_SIZE_TPA, 3074 MIN_RX_SIZE_TPA,
2918 rx_ring_size); 3075 rx_ring_size);
2919 3076
2920 bnx2x_fp(bp, index, bp) = bp;
2921 bnx2x_fp(bp, index, index) = index;
2922
2923 /* Common */ 3077 /* Common */
2924 sb = &bnx2x_fp(bp, index, status_blk); 3078 sb = &bnx2x_fp(bp, index, status_blk);
2925#ifdef BCM_CNIC 3079#ifdef BCM_CNIC
@@ -2947,11 +3101,19 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2947 /* Tx */ 3101 /* Tx */
2948 if (!skip_tx_queue(bp, index)) { 3102 if (!skip_tx_queue(bp, index)) {
2949 /* fastpath tx rings: tx_buf tx_desc */ 3103 /* fastpath tx rings: tx_buf tx_desc */
2950 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring), 3104 for_each_cos_in_tx_queue(fp, cos) {
3105 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3106
3107 DP(BNX2X_MSG_SP, "allocating tx memory of "
3108 "fp %d cos %d",
3109 index, cos);
3110
3111 BNX2X_ALLOC(txdata->tx_buf_ring,
2951 sizeof(struct sw_tx_bd) * NUM_TX_BD); 3112 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2952 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring), 3113 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
2953 &bnx2x_fp(bp, index, tx_desc_mapping), 3114 &txdata->tx_desc_mapping,
2954 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 3115 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3116 }
2955 } 3117 }
2956 3118
2957 /* Rx */ 3119 /* Rx */
@@ -2994,7 +3156,7 @@ alloc_mem_err:
2994 index, ring_size); 3156 index, ring_size);
2995 /* FW will drop all packets if queue is not big enough, 3157 /* FW will drop all packets if queue is not big enough,
2996 * In these cases we disable the queue 3158 * In these cases we disable the queue
2997 * Min size diferent for TPA and non-TPA queues 3159 * Min size is different for OOO, TPA and non-TPA queues
2998 */ 3160 */
2999 if (ring_size < (fp->disable_tpa ? 3161 if (ring_size < (fp->disable_tpa ?
3000 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { 3162 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
@@ -3012,12 +3174,14 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3012 /** 3174 /**
3013 * 1. Allocate FP for leading - fatal if error 3175 * 1. Allocate FP for leading - fatal if error
3014 * 2. {CNIC} Allocate FCoE FP - fatal if error 3176 * 2. {CNIC} Allocate FCoE FP - fatal if error
3015 * 3. Allocate RSS - fix number of queues if error 3177 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3178 * 4. Allocate RSS - fix number of queues if error
3016 */ 3179 */
3017 3180
3018 /* leading */ 3181 /* leading */
3019 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3182 if (bnx2x_alloc_fp_mem_at(bp, 0))
3020 return -ENOMEM; 3183 return -ENOMEM;
3184
3021#ifdef BCM_CNIC 3185#ifdef BCM_CNIC
3022 if (!NO_FCOE(bp)) 3186 if (!NO_FCOE(bp))
3023 /* FCoE */ 3187 /* FCoE */
@@ -3027,6 +3191,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3027 */ 3191 */
3028 return -ENOMEM; 3192 return -ENOMEM;
3029#endif 3193#endif
3194
3030 /* RSS */ 3195 /* RSS */
3031 for_each_nondefault_eth_queue(bp, i) 3196 for_each_nondefault_eth_queue(bp, i)
3032 if (bnx2x_alloc_fp_mem_at(bp, i)) 3197 if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3044,7 +3209,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3044 * FCOE_IDX < FWD_IDX < OOO_IDX 3209 * FCOE_IDX < FWD_IDX < OOO_IDX
3045 */ 3210 */
3046 3211
3047 /* move FCoE fp */ 3212 /* move FCoE fp even NO_FCOE_FLAG is on */
3048 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); 3213 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3049#endif 3214#endif
3050 bp->num_queues -= delta; 3215 bp->num_queues -= delta;
@@ -3067,16 +3232,23 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3067 struct bnx2x_fastpath *fp; 3232 struct bnx2x_fastpath *fp;
3068 struct msix_entry *tbl; 3233 struct msix_entry *tbl;
3069 struct bnx2x_ilt *ilt; 3234 struct bnx2x_ilt *ilt;
3235 int msix_table_size = 0;
3236
3237 /*
3238 * The biggest MSI-X table we might need is as a maximum number of fast
3239 * path IGU SBs plus default SB (for PF).
3240 */
3241 msix_table_size = bp->igu_sb_cnt + 1;
3070 3242
3071 /* fp array */ 3243 /* fp array: RSS plus CNIC related L2 queues */
3072 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL); 3244 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3245 sizeof(*fp), GFP_KERNEL);
3073 if (!fp) 3246 if (!fp)
3074 goto alloc_err; 3247 goto alloc_err;
3075 bp->fp = fp; 3248 bp->fp = fp;
3076 3249
3077 /* msix table */ 3250 /* msix table */
3078 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl), 3251 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
3079 GFP_KERNEL);
3080 if (!tbl) 3252 if (!tbl)
3081 goto alloc_err; 3253 goto alloc_err;
3082 bp->msix_table = tbl; 3254 bp->msix_table = tbl;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index c016e20c5c2b..595d4cdada3e 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -439,6 +439,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
439/* hard_xmit callback */ 439/* hard_xmit callback */
440netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 440netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
441 441
442/* setup_tc callback */
443int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
444
442/* select_queue callback */ 445/* select_queue callback */
443u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 446u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
444 447
@@ -454,7 +457,7 @@ void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
454 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); 457 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
455 458
456/* NAPI poll Tx part */ 459/* NAPI poll Tx part */
457int bnx2x_tx_int(struct bnx2x_fastpath *fp); 460int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
458 461
459/* suspend/resume callbacks */ 462/* suspend/resume callbacks */
460int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 463int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -715,21 +718,22 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
715 return bnx2x_igu_ack_int(bp); 718 return bnx2x_igu_ack_int(bp);
716} 719}
717 720
718static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 721static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
719{ 722{
720 /* Tell compiler that consumer and producer can change */ 723 /* Tell compiler that consumer and producer can change */
721 barrier(); 724 barrier();
722 return fp->tx_pkt_prod != fp->tx_pkt_cons; 725 return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
723} 726}
724 727
725static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) 728static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
729 struct bnx2x_fp_txdata *txdata)
726{ 730{
727 s16 used; 731 s16 used;
728 u16 prod; 732 u16 prod;
729 u16 cons; 733 u16 cons;
730 734
731 prod = fp->tx_bd_prod; 735 prod = txdata->tx_bd_prod;
732 cons = fp->tx_bd_cons; 736 cons = txdata->tx_bd_cons;
733 737
734 /* NUM_TX_RINGS = number of "next-page" entries 738 /* NUM_TX_RINGS = number of "next-page" entries
735 It will be used as a threshold */ 739 It will be used as a threshold */
@@ -737,21 +741,30 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
737 741
738#ifdef BNX2X_STOP_ON_ERROR 742#ifdef BNX2X_STOP_ON_ERROR
739 WARN_ON(used < 0); 743 WARN_ON(used < 0);
740 WARN_ON(used > fp->bp->tx_ring_size); 744 WARN_ON(used > bp->tx_ring_size);
741 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); 745 WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
742#endif 746#endif
743 747
744 return (s16)(fp->bp->tx_ring_size) - used; 748 return (s16)(bp->tx_ring_size) - used;
745} 749}
746 750
747static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 751static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
748{ 752{
749 u16 hw_cons; 753 u16 hw_cons;
750 754
751 /* Tell compiler that status block fields can change */ 755 /* Tell compiler that status block fields can change */
752 barrier(); 756 barrier();
753 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 757 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
754 return hw_cons != fp->tx_pkt_cons; 758 return hw_cons != txdata->tx_pkt_cons;
759}
760
761static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
762{
763 u8 cos;
764 for_each_cos_in_tx_queue(fp, cos)
765 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
766 return true;
767 return false;
755} 768}
756 769
757static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 770static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
@@ -963,7 +976,10 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
963 /* Function parameters */ 976 /* Function parameters */
964 start_params->mf_mode = bp->mf_mode; 977 start_params->mf_mode = bp->mf_mode;
965 start_params->sd_vlan_tag = bp->mf_ov; 978 start_params->sd_vlan_tag = bp->mf_ov;
979 if (CHIP_IS_E1x(bp))
966 start_params->network_cos_mode = OVERRIDE_COS; 980 start_params->network_cos_mode = OVERRIDE_COS;
981 else
982 start_params->network_cos_mode = STATIC_COS;
967 983
968 return bnx2x_func_state_change(bp, &func_params); 984 return bnx2x_func_state_change(bp, &func_params);
969} 985}
@@ -1023,39 +1039,41 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1023 } 1039 }
1024} 1040}
1025 1041
1026static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) 1042static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1027{ 1043{
1028 int i; 1044 int i;
1029 1045
1030 for (i = 1; i <= NUM_TX_RINGS; i++) { 1046 for (i = 1; i <= NUM_TX_RINGS; i++) {
1031 struct eth_tx_next_bd *tx_next_bd = 1047 struct eth_tx_next_bd *tx_next_bd =
1032 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 1048 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1033 1049
1034 tx_next_bd->addr_hi = 1050 tx_next_bd->addr_hi =
1035 cpu_to_le32(U64_HI(fp->tx_desc_mapping + 1051 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1036 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1052 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1037 tx_next_bd->addr_lo = 1053 tx_next_bd->addr_lo =
1038 cpu_to_le32(U64_LO(fp->tx_desc_mapping + 1054 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1039 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1055 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1040 } 1056 }
1041 1057
1042 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 1058 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1043 fp->tx_db.data.zero_fill1 = 0; 1059 txdata->tx_db.data.zero_fill1 = 0;
1044 fp->tx_db.data.prod = 0; 1060 txdata->tx_db.data.prod = 0;
1045 1061
1046 fp->tx_pkt_prod = 0; 1062 txdata->tx_pkt_prod = 0;
1047 fp->tx_pkt_cons = 0; 1063 txdata->tx_pkt_cons = 0;
1048 fp->tx_bd_prod = 0; 1064 txdata->tx_bd_prod = 0;
1049 fp->tx_bd_cons = 0; 1065 txdata->tx_bd_cons = 0;
1050 fp->tx_pkt = 0; 1066 txdata->tx_pkt = 0;
1051} 1067}
1052 1068
1053static inline void bnx2x_init_tx_rings(struct bnx2x *bp) 1069static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1054{ 1070{
1055 int i; 1071 int i;
1072 u8 cos;
1056 1073
1057 for_each_tx_queue(bp, i) 1074 for_each_tx_queue(bp, i)
1058 bnx2x_init_tx_ring_one(&bp->fp[i]); 1075 for_each_cos_in_tx_queue(&bp->fp[i], cos)
1076 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1059} 1077}
1060 1078
1061static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1079static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
@@ -1257,12 +1275,23 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1257 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 1275 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
1258} 1276}
1259 1277
1278static inline void bnx2x_init_txdata(struct bnx2x *bp,
1279 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
1280 __le16 *tx_cons_sb)
1281{
1282 txdata->cid = cid;
1283 txdata->txq_index = txq_index;
1284 txdata->tx_cons_sb = tx_cons_sb;
1285
1286 DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d",
1287 txdata->cid, txdata->txq_index);
1288}
1260 1289
1261#ifdef BCM_CNIC 1290#ifdef BCM_CNIC
1262static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1291static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1263{ 1292{
1264 return bp->cnic_base_cl_id + cl_idx + 1293 return bp->cnic_base_cl_id + cl_idx +
1265 (bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE; 1294 (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
1266} 1295}
1267 1296
1268static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1297static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
@@ -1293,10 +1322,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1293 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; 1322 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1294 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1323 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1295 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1324 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1296 bnx2x_fcoe(bp, bp) = bp;
1297 bnx2x_fcoe(bp, index) = FCOE_IDX;
1298 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1325 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1299 bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; 1326
1327 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
1328 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
1329
1330 DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index);
1331
1300 /* qZone id equals to FW (per path) client id */ 1332 /* qZone id equals to FW (per path) client id */
1301 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1333 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
1302 /* init shortcut */ 1334 /* init shortcut */
@@ -1306,9 +1338,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1306 /* Configure Queue State object */ 1338 /* Configure Queue State object */
1307 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1339 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1308 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1340 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1309 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp), 1341
1310 bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), 1342 /* No multi-CoS for FCoE L2 client */
1311 q_type); 1343 BUG_ON(fp->max_cos != 1);
1344
1345 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
1346 BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1347 bnx2x_sp_mapping(bp, q_rdata), q_type);
1312 1348
1313 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " 1349 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
1314 "igu_sb %d\n", 1350 "igu_sb %d\n",
@@ -1318,15 +1354,16 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1318#endif 1354#endif
1319 1355
1320static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1356static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1321 struct bnx2x_fastpath *fp) 1357 struct bnx2x_fp_txdata *txdata)
1322{ 1358{
1323 int cnt = 1000; 1359 int cnt = 1000;
1324 1360
1325 while (bnx2x_has_tx_work_unload(fp)) { 1361 while (bnx2x_has_tx_work_unload(txdata)) {
1326 if (!cnt) { 1362 if (!cnt) {
1327 BNX2X_ERR("timeout waiting for queue[%d]: " 1363 BNX2X_ERR("timeout waiting for queue[%d]: "
1328 "fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n", 1364 "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1329 fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons); 1365 txdata->txq_index, txdata->tx_pkt_prod,
1366 txdata->tx_pkt_cons);
1330#ifdef BNX2X_STOP_ON_ERROR 1367#ifdef BNX2X_STOP_ON_ERROR
1331 bnx2x_panic(); 1368 bnx2x_panic();
1332 return -EBUSY; 1369 return -EBUSY;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index 45cf3ceef144..3bfba44961d3 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -653,6 +653,26 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
653 } 653 }
654} 654}
655 655
656static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
657{
658 u8 prio, cos;
659 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
660 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
661 if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
662 & (1 << prio)) {
663 bp->prio_to_cos[prio] = cos;
664 }
665 }
666 }
667
668 /* setup tc must be called under rtnl lock, but we can't take it here
669 * as we are handling an attetntion on a work queue which must be
670 * flushed at some rtnl-locked contexts (e.g. if down)
671 */
672 if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
673 schedule_delayed_work(&bp->sp_rtnl_task, 0);
674}
675
656void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 676void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
657{ 677{
658 switch (state) { 678 switch (state) {
@@ -690,6 +710,11 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
690#endif 710#endif
691 bnx2x_dcbx_stop_hw_tx(bp); 711 bnx2x_dcbx_stop_hw_tx(bp);
692 712
713 /* reconfigure the netdevice with the results of the new
714 * dcbx negotiation.
715 */
716 bnx2x_dcbx_update_tc_mapping(bp);
717
693 return; 718 return;
694 } 719 }
695 case BNX2X_DCBX_STATE_TX_PAUSED: 720 case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 1a3ed418946d..ac0223135b7c 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1616,6 +1616,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1616 unsigned char *packet; 1616 unsigned char *packet;
1617 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 1617 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
1618 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; 1618 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
1619 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
1619 u16 tx_start_idx, tx_idx; 1620 u16 tx_start_idx, tx_idx;
1620 u16 rx_start_idx, rx_idx; 1621 u16 rx_start_idx, rx_idx;
1621 u16 pkt_prod, bd_prod, rx_comp_cons; 1622 u16 pkt_prod, bd_prod, rx_comp_cons;
@@ -1670,17 +1671,17 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1670 1671
1671 /* send the loopback packet */ 1672 /* send the loopback packet */
1672 num_pkts = 0; 1673 num_pkts = 0;
1673 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); 1674 tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
1674 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1675 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1675 1676
1676 pkt_prod = fp_tx->tx_pkt_prod++; 1677 pkt_prod = txdata->tx_pkt_prod++;
1677 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; 1678 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
1678 tx_buf->first_bd = fp_tx->tx_bd_prod; 1679 tx_buf->first_bd = txdata->tx_bd_prod;
1679 tx_buf->skb = skb; 1680 tx_buf->skb = skb;
1680 tx_buf->flags = 0; 1681 tx_buf->flags = 0;
1681 1682
1682 bd_prod = TX_BD(fp_tx->tx_bd_prod); 1683 bd_prod = TX_BD(txdata->tx_bd_prod);
1683 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; 1684 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
1684 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 1685 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1685 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1686 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1686 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 1687 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -1697,27 +1698,27 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1697 /* turn on parsing and get a BD */ 1698 /* turn on parsing and get a BD */
1698 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1699 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1699 1700
1700 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x; 1701 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
1701 pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2; 1702 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
1702 1703
1703 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 1704 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1704 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 1705 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1705 1706
1706 wmb(); 1707 wmb();
1707 1708
1708 fp_tx->tx_db.data.prod += 2; 1709 txdata->tx_db.data.prod += 2;
1709 barrier(); 1710 barrier();
1710 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); 1711 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
1711 1712
1712 mmiowb(); 1713 mmiowb();
1713 barrier(); 1714 barrier();
1714 1715
1715 num_pkts++; 1716 num_pkts++;
1716 fp_tx->tx_bd_prod += 2; /* start + pbd */ 1717 txdata->tx_bd_prod += 2; /* start + pbd */
1717 1718
1718 udelay(100); 1719 udelay(100);
1719 1720
1720 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); 1721 tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
1721 if (tx_idx != tx_start_idx + num_pkts) 1722 if (tx_idx != tx_start_idx + num_pkts)
1722 goto test_loopback_exit; 1723 goto test_loopback_exit;
1723 1724
@@ -1731,7 +1732,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1731 * bnx2x_tx_int()), as both are taking netif_tx_lock(). 1732 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1732 */ 1733 */
1733 local_bh_disable(); 1734 local_bh_disable();
1734 bnx2x_tx_int(fp_tx); 1735 bnx2x_tx_int(bp, txdata);
1735 local_bh_enable(); 1736 local_bh_enable();
1736 } 1737 }
1737 1738
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 0692d75756df..ce3b5662ca5a 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -2545,7 +2545,7 @@ struct host_func_stats {
2545 2545
2546#define BCM_5710_FW_MAJOR_VERSION 7 2546#define BCM_5710_FW_MAJOR_VERSION 7
2547#define BCM_5710_FW_MINOR_VERSION 0 2547#define BCM_5710_FW_MINOR_VERSION 0
2548#define BCM_5710_FW_REVISION_VERSION 20 2548#define BCM_5710_FW_REVISION_VERSION 23
2549#define BCM_5710_FW_ENGINEERING_VERSION 0 2549#define BCM_5710_FW_ENGINEERING_VERSION 0
2550#define BCM_5710_FW_COMPILE_FLAGS 1 2550#define BCM_5710_FW_COMPILE_FLAGS 1
2551 2551
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index df9f196dd6e8..82795a8349f6 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -128,11 +128,10 @@ enum {
128 MODE_MF_NIV = 0x00000800, 128 MODE_MF_NIV = 0x00000800,
129 MODE_E3_A0 = 0x00001000, 129 MODE_E3_A0 = 0x00001000,
130 MODE_E3_B0 = 0x00002000, 130 MODE_E3_B0 = 0x00002000,
131 MODE_COS_BC = 0x00004000, 131 MODE_COS3 = 0x00004000,
132 MODE_COS3 = 0x00008000, 132 MODE_COS6 = 0x00008000,
133 MODE_COS6 = 0x00010000, 133 MODE_LITTLE_ENDIAN = 0x00010000,
134 MODE_LITTLE_ENDIAN = 0x00020000, 134 MODE_BIG_ENDIAN = 0x00020000,
135 MODE_BIG_ENDIAN = 0x00040000,
136}; 135};
137 136
138/* Init Blocks */ 137/* Init Blocks */
@@ -179,7 +178,7 @@ enum {
179#define BNX2X_TOE_Q 3 178#define BNX2X_TOE_Q 3
180#define BNX2X_TOE_ACK_Q 6 179#define BNX2X_TOE_ACK_Q 6
181#define BNX2X_ISCSI_Q 9 180#define BNX2X_ISCSI_Q 9
182#define BNX2X_ISCSI_ACK_Q 8 181#define BNX2X_ISCSI_ACK_Q 11
183#define BNX2X_FCOE_Q 10 182#define BNX2X_FCOE_Q 10
184 183
185/* Vnics per mode */ 184/* Vnics per mode */
@@ -257,14 +256,16 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
257} 256}
258 257
259/* Configures the QM according to the specified per-traffic-type COSes */ 258/* Configures the QM according to the specified per-traffic-type COSes */
260static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, 259static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
261 struct priority_cos *traffic_cos) 260 struct priority_cos *traffic_cos)
262{ 261{
263 bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, 262 bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
264 traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); 263 traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
265 bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, 264 bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
266 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); 265 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
267 if (INIT_MODE_FLAGS(bp) & MODE_COS_BC) { 266 bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
267 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
268 if (mode != STATIC_COS) {
268 /* required only in backward compatible COS mode */ 269 /* required only in backward compatible COS mode */
269 bnx2x_map_q_cos(bp, BNX2X_ETH_Q, 270 bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
270 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); 271 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
@@ -272,8 +273,6 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp,
272 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); 273 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
273 bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, 274 bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
274 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); 275 traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
275 bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
276 traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
277 } 276 }
278} 277}
279 278
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 53f4ec3d1d9e..8a374a77cdc9 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -767,6 +767,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
767 int func = BP_FUNC(bp); 767 int func = BP_FUNC(bp);
768#ifdef BNX2X_STOP_ON_ERROR 768#ifdef BNX2X_STOP_ON_ERROR
769 u16 start = 0, end = 0; 769 u16 start = 0, end = 0;
770 u8 cos;
770#endif 771#endif
771 772
772 bp->stats_state = STATS_STATE_DISABLED; 773 bp->stats_state = STATS_STATE_DISABLED;
@@ -822,8 +823,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
822 CHIP_IS_E1x(bp) ? 823 CHIP_IS_E1x(bp) ?
823 sb_data_e1x.index_data : 824 sb_data_e1x.index_data :
824 sb_data_e2.index_data; 825 sb_data_e2.index_data;
825 int data_size; 826 u8 data_size, cos;
826 u32 *sb_data_p; 827 u32 *sb_data_p;
828 struct bnx2x_fp_txdata txdata;
827 829
828 /* Rx */ 830 /* Rx */
829 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" 831 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
@@ -838,11 +840,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
838 le16_to_cpu(fp->fp_hc_idx)); 840 le16_to_cpu(fp->fp_hc_idx));
839 841
840 /* Tx */ 842 /* Tx */
841 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" 843 for_each_cos_in_tx_queue(fp, cos)
842 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" 844 {
843 " *tx_cons_sb(0x%x)\n", 845 txdata = fp->txdata[cos];
844 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
845 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
848 " *tx_cons_sb(0x%x)\n",
849 i, txdata.tx_pkt_prod,
850 txdata.tx_pkt_cons, txdata.tx_bd_prod,
851 txdata.tx_bd_cons,
852 le16_to_cpu(*txdata.tx_cons_sb));
853 }
846 854
847 loop = CHIP_IS_E1x(bp) ? 855 loop = CHIP_IS_E1x(bp) ?
848 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 856 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
@@ -961,23 +969,31 @@ void bnx2x_panic_dump(struct bnx2x *bp)
961 /* Tx */ 969 /* Tx */
962 for_each_tx_queue(bp, i) { 970 for_each_tx_queue(bp, i) {
963 struct bnx2x_fastpath *fp = &bp->fp[i]; 971 struct bnx2x_fastpath *fp = &bp->fp[i];
972 for_each_cos_in_tx_queue(fp, cos) {
973 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
974
975 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
976 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
977 for (j = start; j != end; j = TX_BD(j + 1)) {
978 struct sw_tx_bd *sw_bd =
979 &txdata->tx_buf_ring[j];
980
981 BNX2X_ERR("fp%d: txdata %d, "
982 "packet[%x]=[%p,%x]\n",
983 i, cos, j, sw_bd->skb,
984 sw_bd->first_bd);
985 }
964 986
965 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 987 start = TX_BD(txdata->tx_bd_cons - 10);
966 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 988 end = TX_BD(txdata->tx_bd_cons + 254);
967 for (j = start; j != end; j = TX_BD(j + 1)) { 989 for (j = start; j != end; j = TX_BD(j + 1)) {
968 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j]; 990 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
969
970 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
971 i, j, sw_bd->skb, sw_bd->first_bd);
972 }
973
974 start = TX_BD(fp->tx_bd_cons - 10);
975 end = TX_BD(fp->tx_bd_cons + 254);
976 for (j = start; j != end; j = TX_BD(j + 1)) {
977 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
978 991
979 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n", 992 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
980 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); 993 "[%x:%x:%x:%x]\n",
994 i, cos, j, tx_bd[0], tx_bd[1],
995 tx_bd[2], tx_bd[3]);
996 }
981 } 997 }
982 } 998 }
983#endif 999#endif
@@ -1533,7 +1549,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
1533 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1549 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1534} 1550}
1535 1551
1536static void bnx2x_int_disable(struct bnx2x *bp) 1552void bnx2x_int_disable(struct bnx2x *bp)
1537{ 1553{
1538 if (bp->common.int_block == INT_BLOCK_HC) 1554 if (bp->common.int_block == INT_BLOCK_HC)
1539 bnx2x_hc_int_disable(bp); 1555 bnx2x_hc_int_disable(bp);
@@ -1663,6 +1679,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1663 drv_cmd = BNX2X_Q_CMD_SETUP; 1679 drv_cmd = BNX2X_Q_CMD_SETUP;
1664 break; 1680 break;
1665 1681
1682 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1683 DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1684 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1685 break;
1686
1666 case (RAMROD_CMD_ID_ETH_HALT): 1687 case (RAMROD_CMD_ID_ETH_HALT):
1667 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); 1688 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1668 drv_cmd = BNX2X_Q_CMD_HALT; 1689 drv_cmd = BNX2X_Q_CMD_HALT;
@@ -1722,6 +1743,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1722 u16 status = bnx2x_ack_int(bp); 1743 u16 status = bnx2x_ack_int(bp);
1723 u16 mask; 1744 u16 mask;
1724 int i; 1745 int i;
1746 u8 cos;
1725 1747
1726 /* Return here if interrupt is shared and it's not for us */ 1748 /* Return here if interrupt is shared and it's not for us */
1727 if (unlikely(status == 0)) { 1749 if (unlikely(status == 0)) {
@@ -1738,11 +1760,12 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1738 for_each_eth_queue(bp, i) { 1760 for_each_eth_queue(bp, i) {
1739 struct bnx2x_fastpath *fp = &bp->fp[i]; 1761 struct bnx2x_fastpath *fp = &bp->fp[i];
1740 1762
1741 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); 1763 mask = 0x2 << (fp->index + CNIC_PRESENT);
1742 if (status & mask) { 1764 if (status & mask) {
1743 /* Handle Rx or Tx according to SB id */ 1765 /* Handle Rx or Tx according to SB id */
1744 prefetch(fp->rx_cons_sb); 1766 prefetch(fp->rx_cons_sb);
1745 prefetch(fp->tx_cons_sb); 1767 for_each_cos_in_tx_queue(fp, cos)
1768 prefetch(fp->txdata[cos].tx_cons_sb);
1746 prefetch(&fp->sb_running_index[SM_RX_ID]); 1769 prefetch(&fp->sb_running_index[SM_RX_ID]);
1747 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1770 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1748 status &= ~mask; 1771 status &= ~mask;
@@ -2632,15 +2655,43 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2632 } 2655 }
2633} 2656}
2634 2657
2635static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2658/**
2636 struct bnx2x_fastpath *fp, 2659 * bnx2x_get_tx_only_flags - Return common flags
2637 bool leading) 2660 *
2661 * @bp device handle
2662 * @fp queue handle
2663 * @zero_stats TRUE if statistics zeroing is needed
2664 *
2665 * Return the flags that are common for the Tx-only and not normal connections.
2666 */
2667static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2668 struct bnx2x_fastpath *fp,
2669 bool zero_stats)
2638{ 2670{
2639 unsigned long flags = 0; 2671 unsigned long flags = 0;
2640 2672
2641 /* PF driver will always initialize the Queue to an ACTIVE state */ 2673 /* PF driver will always initialize the Queue to an ACTIVE state */
2642 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2674 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2643 2675
2676 /* tx only connections collect statistics (on the same index as the
2677 * parent connection). The statistics are zeroed when the parent
2678 * connection is initialized.
2679 */
2680 if (stat_counter_valid(bp, fp)) {
2681 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2682 if (zero_stats)
2683 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2684 }
2685
2686 return flags;
2687}
2688
2689static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2690 struct bnx2x_fastpath *fp,
2691 bool leading)
2692{
2693 unsigned long flags = 0;
2694
2644 /* calculate other queue flags */ 2695 /* calculate other queue flags */
2645 if (IS_MF_SD(bp)) 2696 if (IS_MF_SD(bp))
2646 __set_bit(BNX2X_Q_FLG_OV, &flags); 2697 __set_bit(BNX2X_Q_FLG_OV, &flags);
@@ -2651,11 +2702,6 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2651 if (!fp->disable_tpa) 2702 if (!fp->disable_tpa)
2652 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2703 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2653 2704
2654 if (stat_counter_valid(bp, fp)) {
2655 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2656 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2657 }
2658
2659 if (leading) { 2705 if (leading) {
2660 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 2706 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
2661 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 2707 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
@@ -2664,11 +2710,13 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2664 /* Always set HW VLAN stripping */ 2710 /* Always set HW VLAN stripping */
2665 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2711 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2666 2712
2667 return flags; 2713
2714 return flags | bnx2x_get_common_flags(bp, fp, true);
2668} 2715}
2669 2716
2670static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 2717static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
2671 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init) 2718 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
2719 u8 cos)
2672{ 2720{
2673 gen_init->stat_id = bnx2x_stats_id(fp); 2721 gen_init->stat_id = bnx2x_stats_id(fp);
2674 gen_init->spcl_id = fp->cl_id; 2722 gen_init->spcl_id = fp->cl_id;
@@ -2678,6 +2726,8 @@ static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
2678 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 2726 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2679 else 2727 else
2680 gen_init->mtu = bp->dev->mtu; 2728 gen_init->mtu = bp->dev->mtu;
2729
2730 gen_init->cos = cos;
2681} 2731}
2682 2732
2683static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 2733static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
@@ -2745,14 +2795,15 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2745 if (IS_FCOE_FP(fp)) 2795 if (IS_FCOE_FP(fp))
2746 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2796 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2747 else 2797 else
2748 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; 2798 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2749} 2799}
2750 2800
2751static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2801static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2752 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init) 2802 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2803 u8 cos)
2753{ 2804{
2754 txq_init->dscr_map = fp->tx_desc_mapping; 2805 txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
2755 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; 2806 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2756 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2807 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2757 txq_init->fw_sb_id = fp->fw_sb_id; 2808 txq_init->fw_sb_id = fp->fw_sb_id;
2758 2809
@@ -2948,6 +2999,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2948static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 2999static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
2949{ 3000{
2950 if ((cmd_type == NONE_CONNECTION_TYPE) || 3001 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3002 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2951 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3003 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2952 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3004 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2953 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3005 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
@@ -4270,12 +4322,13 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4270static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4322static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4271 struct bnx2x *bp, u32 cid) 4323 struct bnx2x *bp, u32 cid)
4272{ 4324{
4325 DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid);
4273#ifdef BCM_CNIC 4326#ifdef BCM_CNIC
4274 if (cid == BNX2X_FCOE_ETH_CID) 4327 if (cid == BNX2X_FCOE_ETH_CID)
4275 return &bnx2x_fcoe(bp, q_obj); 4328 return &bnx2x_fcoe(bp, q_obj);
4276 else 4329 else
4277#endif 4330#endif
4278 return &bnx2x_fp(bp, cid, q_obj); 4331 return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
4279} 4332}
4280 4333
4281static void bnx2x_eq_int(struct bnx2x *bp) 4334static void bnx2x_eq_int(struct bnx2x *bp)
@@ -4522,6 +4575,7 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
4522 4575
4523static void bnx2x_timer(unsigned long data) 4576static void bnx2x_timer(unsigned long data)
4524{ 4577{
4578 u8 cos;
4525 struct bnx2x *bp = (struct bnx2x *) data; 4579 struct bnx2x *bp = (struct bnx2x *) data;
4526 4580
4527 if (!netif_running(bp->dev)) 4581 if (!netif_running(bp->dev))
@@ -4530,7 +4584,8 @@ static void bnx2x_timer(unsigned long data)
4530 if (poll) { 4584 if (poll) {
4531 struct bnx2x_fastpath *fp = &bp->fp[0]; 4585 struct bnx2x_fastpath *fp = &bp->fp[0];
4532 4586
4533 bnx2x_tx_int(fp); 4587 for_each_cos_in_tx_queue(fp, cos)
4588 bnx2x_tx_int(bp, &fp->txdata[cos]);
4534 bnx2x_rx_int(fp, 1000); 4589 bnx2x_rx_int(fp, 1000);
4535 } 4590 }
4536 4591
@@ -4735,10 +4790,17 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4735static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 4790static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
4736 u16 tx_usec, u16 rx_usec) 4791 u16 tx_usec, u16 rx_usec)
4737{ 4792{
4738 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, 4793 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
4739 false, rx_usec); 4794 false, rx_usec);
4740 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX, 4795 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
4741 false, tx_usec); 4796 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
4797 tx_usec);
4798 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
4799 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
4800 tx_usec);
4801 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
4802 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
4803 tx_usec);
4742} 4804}
4743 4805
4744static void bnx2x_init_def_sb(struct bnx2x *bp) 4806static void bnx2x_init_def_sb(struct bnx2x *bp)
@@ -5035,12 +5097,12 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5035 5097
5036static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5098static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5037{ 5099{
5038 return fp->bp->igu_base_sb + fp->index + CNIC_CONTEXT_USE; 5100 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
5039} 5101}
5040 5102
5041static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5103static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5042{ 5104{
5043 return fp->bp->base_fw_ndsb + fp->index + CNIC_CONTEXT_USE; 5105 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
5044} 5106}
5045 5107
5046static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5108static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5051,10 +5113,12 @@ static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5051 return bnx2x_fp_igu_sb_id(fp); 5113 return bnx2x_fp_igu_sb_id(fp);
5052} 5114}
5053 5115
5054static void bnx2x_init_fp(struct bnx2x *bp, int fp_idx) 5116static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5055{ 5117{
5056 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 5118 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
5119 u8 cos;
5057 unsigned long q_type = 0; 5120 unsigned long q_type = 0;
5121 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
5058 5122
5059 fp->cid = fp_idx; 5123 fp->cid = fp_idx;
5060 fp->cl_id = bnx2x_fp_cl_id(fp); 5124 fp->cl_id = bnx2x_fp_cl_id(fp);
@@ -5067,14 +5131,25 @@ static void bnx2x_init_fp(struct bnx2x *bp, int fp_idx)
5067 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 5131 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5068 /* Setup SB indicies */ 5132 /* Setup SB indicies */
5069 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 5133 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5070 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5071 5134
5072 /* Configure Queue State object */ 5135 /* Configure Queue State object */
5073 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 5136 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
5074 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 5137 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
5075 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp), 5138
5076 bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), 5139 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
5077 q_type); 5140
5141 /* init tx data */
5142 for_each_cos_in_tx_queue(fp, cos) {
5143 bnx2x_init_txdata(bp, &fp->txdata[cos],
5144 CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
5145 FP_COS_TO_TXQ(fp, cos),
5146 BNX2X_TX_SB_INDEX_BASE + cos);
5147 cids[cos] = fp->txdata[cos].cid;
5148 }
5149
5150 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
5151 BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5152 bnx2x_sp_mapping(bp, q_rdata), q_type);
5078 5153
5079 /** 5154 /**
5080 * Configure classification DBs: Always enable Tx switching 5155 * Configure classification DBs: Always enable Tx switching
@@ -5096,7 +5171,7 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5096 int i; 5171 int i;
5097 5172
5098 for_each_eth_queue(bp, i) 5173 for_each_eth_queue(bp, i)
5099 bnx2x_init_fp(bp, i); 5174 bnx2x_init_eth_fp(bp, i);
5100#ifdef BCM_CNIC 5175#ifdef BCM_CNIC
5101 if (!NO_FCOE(bp)) 5176 if (!NO_FCOE(bp))
5102 bnx2x_init_fcoe_fp(bp); 5177 bnx2x_init_fcoe_fp(bp);
@@ -6718,7 +6793,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
6718 if (bnx2x_alloc_fw_stats_mem(bp)) 6793 if (bnx2x_alloc_fw_stats_mem(bp))
6719 goto alloc_mem_err; 6794 goto alloc_mem_err;
6720 6795
6721 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; 6796 bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
6722 6797
6723 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, 6798 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6724 bp->context.size); 6799 bp->context.size);
@@ -6837,7 +6912,7 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6837 bnx2x_enable_msi(bp); 6912 bnx2x_enable_msi(bp);
6838 /* falling through... */ 6913 /* falling through... */
6839 case INT_MODE_INTx: 6914 case INT_MODE_INTx:
6840 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; 6915 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
6841 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 6916 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6842 break; 6917 break;
6843 default: 6918 default:
@@ -6859,8 +6934,8 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6859 "enable MSI-X (%d), " 6934 "enable MSI-X (%d), "
6860 "set number of queues to %d\n", 6935 "set number of queues to %d\n",
6861 bp->num_queues, 6936 bp->num_queues,
6862 1 + NONE_ETH_CONTEXT_USE); 6937 1 + NON_ETH_CONTEXT_USE);
6863 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; 6938 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
6864 6939
6865 /* Try to enable MSI */ 6940 /* Try to enable MSI */
6866 if (!(bp->flags & DISABLE_MSI_FLAG)) 6941 if (!(bp->flags & DISABLE_MSI_FLAG))
@@ -6988,6 +7063,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
6988static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7063static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
6989 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7064 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
6990{ 7065{
7066
7067 u8 cos;
6991 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 7068 /* FCoE Queue uses Default SB, thus has no HC capabilities */
6992 if (!IS_FCOE_FP(fp)) { 7069 if (!IS_FCOE_FP(fp)) {
6993 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 7070 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7013,13 +7090,56 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7013 * CQ index among the SB indices: FCoE clients uses the default 7090 * CQ index among the SB indices: FCoE clients uses the default
7014 * SB, therefore it's different. 7091 * SB, therefore it's different.
7015 */ 7092 */
7016 init_params->rx.sb_cq_index = U_SB_ETH_RX_CQ_INDEX; 7093 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
7017 init_params->tx.sb_cq_index = C_SB_ETH_TX_CQ_INDEX; 7094 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
7018 } 7095 }
7019 7096
7020 init_params->cxt = &bp->context.vcxt[fp->cid].eth; 7097 /* set maximum number of COSs supported by this queue */
7098 init_params->max_cos = fp->max_cos;
7099
7100 DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d",
7101 fp->index, init_params->max_cos);
7102
7103 /* set the context pointers queue object */
7104 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
7105 init_params->cxts[cos] =
7106 &bp->context.vcxt[fp->txdata[cos].cid].eth;
7021} 7107}
7022 7108
7109int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7110 struct bnx2x_queue_state_params *q_params,
7111 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
7112 int tx_index, bool leading)
7113{
7114 memset(tx_only_params, 0, sizeof(*tx_only_params));
7115
7116 /* Set the command */
7117 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
7118
7119 /* Set tx-only QUEUE flags: don't zero statistics */
7120 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
7121
7122 /* choose the index of the cid to send the slow path on */
7123 tx_only_params->cid_index = tx_index;
7124
7125 /* Set general TX_ONLY_SETUP parameters */
7126 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
7127
7128 /* Set Tx TX_ONLY_SETUP parameters */
7129 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
7130
7131 DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
7132 "cos %d, primary cid %d, cid %d, "
7133 "client id %d, sp-client id %d, flags %lx",
7134 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
7135 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
7136 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
7137
7138 /* send the ramrod */
7139 return bnx2x_queue_state_change(bp, q_params);
7140}
7141
7142
7023/** 7143/**
7024 * bnx2x_setup_queue - setup queue 7144 * bnx2x_setup_queue - setup queue
7025 * 7145 *
@@ -7037,7 +7157,12 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7037 struct bnx2x_queue_state_params q_params = {0}; 7157 struct bnx2x_queue_state_params q_params = {0};
7038 struct bnx2x_queue_setup_params *setup_params = 7158 struct bnx2x_queue_setup_params *setup_params =
7039 &q_params.params.setup; 7159 &q_params.params.setup;
7160 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
7161 &q_params.params.tx_only;
7040 int rc; 7162 int rc;
7163 u8 tx_index;
7164
7165 DP(BNX2X_MSG_SP, "setting up queue %d", fp->index);
7041 7166
7042 /* reset IGU state skip FCoE L2 queue */ 7167 /* reset IGU state skip FCoE L2 queue */
7043 if (!IS_FCOE_FP(fp)) 7168 if (!IS_FCOE_FP(fp))
@@ -7057,10 +7182,13 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7057 /* Change the state to INIT */ 7182 /* Change the state to INIT */
7058 rc = bnx2x_queue_state_change(bp, &q_params); 7183 rc = bnx2x_queue_state_change(bp, &q_params);
7059 if (rc) { 7184 if (rc) {
7060 BNX2X_ERR("Queue INIT failed\n"); 7185 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
7061 return rc; 7186 return rc;
7062 } 7187 }
7063 7188
7189 DP(BNX2X_MSG_SP, "init complete");
7190
7191
7064 /* Now move the Queue to the SETUP state... */ 7192 /* Now move the Queue to the SETUP state... */
7065 memset(setup_params, 0, sizeof(*setup_params)); 7193 memset(setup_params, 0, sizeof(*setup_params));
7066 7194
@@ -7068,20 +7196,39 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7068 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 7196 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
7069 7197
7070 /* Set general SETUP parameters */ 7198 /* Set general SETUP parameters */
7071 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params); 7199 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
7200 FIRST_TX_COS_INDEX);
7072 7201
7073 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause, 7202 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
7074 &setup_params->rxq_params); 7203 &setup_params->rxq_params);
7075 7204
7076 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params); 7205 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
7206 FIRST_TX_COS_INDEX);
7077 7207
7078 /* Set the command */ 7208 /* Set the command */
7079 q_params.cmd = BNX2X_Q_CMD_SETUP; 7209 q_params.cmd = BNX2X_Q_CMD_SETUP;
7080 7210
7081 /* Change the state to SETUP */ 7211 /* Change the state to SETUP */
7082 rc = bnx2x_queue_state_change(bp, &q_params); 7212 rc = bnx2x_queue_state_change(bp, &q_params);
7083 if (rc) 7213 if (rc) {
7084 BNX2X_ERR("Queue SETUP failed\n"); 7214 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
7215 return rc;
7216 }
7217
7218 /* loop through the relevant tx-only indices */
7219 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
7220 tx_index < fp->max_cos;
7221 tx_index++) {
7222
7223 /* prepare and send tx-only ramrod*/
7224 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
7225 tx_only_params, tx_index, leading);
7226 if (rc) {
7227 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
7228 fp->index, tx_index);
7229 return rc;
7230 }
7231 }
7085 7232
7086 return rc; 7233 return rc;
7087} 7234}
@@ -7089,27 +7236,67 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7089static int bnx2x_stop_queue(struct bnx2x *bp, int index) 7236static int bnx2x_stop_queue(struct bnx2x *bp, int index)
7090{ 7237{
7091 struct bnx2x_fastpath *fp = &bp->fp[index]; 7238 struct bnx2x_fastpath *fp = &bp->fp[index];
7239 struct bnx2x_fp_txdata *txdata;
7092 struct bnx2x_queue_state_params q_params = {0}; 7240 struct bnx2x_queue_state_params q_params = {0};
7093 int rc; 7241 int rc, tx_index;
7242
7243 DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid);
7094 7244
7095 q_params.q_obj = &fp->q_obj; 7245 q_params.q_obj = &fp->q_obj;
7096 /* We want to wait for completion in this context */ 7246 /* We want to wait for completion in this context */
7097 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 7247 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
7098 7248
7099 /* halt the connection */ 7249
7250 /* close tx-only connections */
7251 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
7252 tx_index < fp->max_cos;
7253 tx_index++){
7254
7255 /* ascertain this is a normal queue*/
7256 txdata = &fp->txdata[tx_index];
7257
7258 DP(BNX2X_MSG_SP, "stopping tx-only queue %d",
7259 txdata->txq_index);
7260
7261 /* send halt terminate on tx-only connection */
7262 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
7263 memset(&q_params.params.terminate, 0,
7264 sizeof(q_params.params.terminate));
7265 q_params.params.terminate.cid_index = tx_index;
7266
7267 rc = bnx2x_queue_state_change(bp, &q_params);
7268 if (rc)
7269 return rc;
7270
7271 /* send halt terminate on tx-only connection */
7272 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
7273 memset(&q_params.params.cfc_del, 0,
7274 sizeof(q_params.params.cfc_del));
7275 q_params.params.cfc_del.cid_index = tx_index;
7276 rc = bnx2x_queue_state_change(bp, &q_params);
7277 if (rc)
7278 return rc;
7279 }
7280 /* Stop the primary connection: */
7281 /* ...halt the connection */
7100 q_params.cmd = BNX2X_Q_CMD_HALT; 7282 q_params.cmd = BNX2X_Q_CMD_HALT;
7101 rc = bnx2x_queue_state_change(bp, &q_params); 7283 rc = bnx2x_queue_state_change(bp, &q_params);
7102 if (rc) 7284 if (rc)
7103 return rc; 7285 return rc;
7104 7286
7105 /* terminate the connection */ 7287 /* ...terminate the connection */
7106 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 7288 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
7289 memset(&q_params.params.terminate, 0,
7290 sizeof(q_params.params.terminate));
7291 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
7107 rc = bnx2x_queue_state_change(bp, &q_params); 7292 rc = bnx2x_queue_state_change(bp, &q_params);
7108 if (rc) 7293 if (rc)
7109 return rc; 7294 return rc;
7110 7295 /* ...delete cfc entry */
7111 /* delete cfc entry */
7112 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 7296 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
7297 memset(&q_params.params.cfc_del, 0,
7298 sizeof(q_params.params.cfc_del));
7299 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
7113 return bnx2x_queue_state_change(bp, &q_params); 7300 return bnx2x_queue_state_change(bp, &q_params);
7114} 7301}
7115 7302
@@ -7130,8 +7317,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
7130 for_each_eth_queue(bp, i) { 7317 for_each_eth_queue(bp, i) {
7131 struct bnx2x_fastpath *fp = &bp->fp[i]; 7318 struct bnx2x_fastpath *fp = &bp->fp[i];
7132 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7319 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7133 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 7320 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
7134 SB_DISABLED); 7321 SB_DISABLED);
7135 } 7322 }
7136 7323
7137#ifdef BCM_CNIC 7324#ifdef BCM_CNIC
@@ -7142,8 +7329,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
7142#endif 7329#endif
7143 /* SP SB */ 7330 /* SP SB */
7144 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7331 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7145 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 7332 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
7146 SB_DISABLED); 7333 SB_DISABLED);
7147 7334
7148 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 7335 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
7149 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 7336 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -7352,7 +7539,8 @@ void bnx2x_send_unload_done(struct bnx2x *bp)
7352void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 7539void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7353{ 7540{
7354 int port = BP_PORT(bp); 7541 int port = BP_PORT(bp);
7355 int i, rc; 7542 int i, rc = 0;
7543 u8 cos;
7356 struct bnx2x_mcast_ramrod_params rparam = {0}; 7544 struct bnx2x_mcast_ramrod_params rparam = {0};
7357 u32 reset_code; 7545 u32 reset_code;
7358 7546
@@ -7360,7 +7548,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7360 for_each_tx_queue(bp, i) { 7548 for_each_tx_queue(bp, i) {
7361 struct bnx2x_fastpath *fp = &bp->fp[i]; 7549 struct bnx2x_fastpath *fp = &bp->fp[i];
7362 7550
7363 rc = bnx2x_clean_tx_queue(bp, fp); 7551 for_each_cos_in_tx_queue(fp, cos)
7552 rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
7364#ifdef BNX2X_STOP_ON_ERROR 7553#ifdef BNX2X_STOP_ON_ERROR
7365 if (rc) 7554 if (rc)
7366 return; 7555 return;
@@ -7888,7 +8077,7 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp)
7888 8077
7889/* 8078/*
7890 * Assumption: runs under rtnl lock. This together with the fact 8079 * Assumption: runs under rtnl lock. This together with the fact
7891 * that it's called only from bnx2x_reset_task() ensure that it 8080 * that it's called only from bnx2x_sp_rtnl() ensure that it
7892 * will never be called when netif_running(bp->dev) is false. 8081 * will never be called when netif_running(bp->dev) is false.
7893 */ 8082 */
7894static void bnx2x_parity_recover(struct bnx2x *bp) 8083static void bnx2x_parity_recover(struct bnx2x *bp)
@@ -8045,6 +8234,9 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
8045 if (!netif_running(bp->dev)) 8234 if (!netif_running(bp->dev))
8046 goto sp_rtnl_exit; 8235 goto sp_rtnl_exit;
8047 8236
8237 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
8238 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
8239
8048 /* if stop on error is defined no recovery flows should be executed */ 8240 /* if stop on error is defined no recovery flows should be executed */
8049#ifdef BNX2X_STOP_ON_ERROR 8241#ifdef BNX2X_STOP_ON_ERROR
8050 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " 8242 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
@@ -8387,14 +8579,11 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8387 int vn = BP_E1HVN(bp); 8579 int vn = BP_E1HVN(bp);
8388 int igu_sb_id; 8580 int igu_sb_id;
8389 u32 val; 8581 u32 val;
8390 u8 fid; 8582 u8 fid, igu_sb_cnt = 0;
8391 8583
8392 bp->igu_base_sb = 0xff; 8584 bp->igu_base_sb = 0xff;
8393 bp->igu_sb_cnt = 0;
8394 if (CHIP_INT_MODE_IS_BC(bp)) { 8585 if (CHIP_INT_MODE_IS_BC(bp)) {
8395 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, 8586 igu_sb_cnt = bp->igu_sb_cnt;
8396 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8397
8398 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8587 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8399 FP_SB_MAX_E1x; 8588 FP_SB_MAX_E1x;
8400 8589
@@ -8420,19 +8609,21 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8420 else { 8609 else {
8421 if (bp->igu_base_sb == 0xff) 8610 if (bp->igu_base_sb == 0xff)
8422 bp->igu_base_sb = igu_sb_id; 8611 bp->igu_base_sb = igu_sb_id;
8423 bp->igu_sb_cnt++; 8612 igu_sb_cnt++;
8424 } 8613 }
8425 } 8614 }
8426 } 8615 }
8427 8616
8428 /* It's expected that number of CAM entries for this 8617#ifdef CONFIG_PCI_MSI
8429 * functions is equal to the MSI-X table size (which was a 8618 /*
8430 * used during bp->l2_cid_count value calculation. 8619 * It's expected that number of CAM entries for this functions is equal
8431 * We want a harsh warning if these values are different! 8620 * to the number evaluated based on the MSI-X table size. We want a
8621 * harsh warning if these values are different!
8432 */ 8622 */
8433 WARN_ON(bp->igu_sb_cnt != NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); 8623 WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
8624#endif
8434 8625
8435 if (bp->igu_sb_cnt == 0) 8626 if (igu_sb_cnt == 0)
8436 BNX2X_ERR("CAM configuration error\n"); 8627 BNX2X_ERR("CAM configuration error\n");
8437} 8628}
8438 8629
@@ -8961,13 +9152,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8961 9152
8962 bnx2x_get_common_hwinfo(bp); 9153 bnx2x_get_common_hwinfo(bp);
8963 9154
9155 /*
9156 * initialize IGU parameters
9157 */
8964 if (CHIP_IS_E1x(bp)) { 9158 if (CHIP_IS_E1x(bp)) {
8965 bp->common.int_block = INT_BLOCK_HC; 9159 bp->common.int_block = INT_BLOCK_HC;
8966 9160
8967 bp->igu_dsb_id = DEF_SB_IGU_ID; 9161 bp->igu_dsb_id = DEF_SB_IGU_ID;
8968 bp->igu_base_sb = 0; 9162 bp->igu_base_sb = 0;
8969 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8970 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8971 } else { 9163 } else {
8972 bp->common.int_block = INT_BLOCK_IGU; 9164 bp->common.int_block = INT_BLOCK_IGU;
8973 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9165 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -9260,10 +9452,8 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
9260 SET_FLAGS(flags, MODE_E3); 9452 SET_FLAGS(flags, MODE_E3);
9261 if (CHIP_REV(bp) == CHIP_REV_Ax) 9453 if (CHIP_REV(bp) == CHIP_REV_Ax)
9262 SET_FLAGS(flags, MODE_E3_A0); 9454 SET_FLAGS(flags, MODE_E3_A0);
9263 else {/*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 9455 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
9264 SET_FLAGS(flags, MODE_E3_B0); 9456 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
9265 SET_FLAGS(flags, MODE_COS_BC);
9266 }
9267 } 9457 }
9268 9458
9269 if (IS_MF(bp)) { 9459 if (IS_MF(bp)) {
@@ -9371,6 +9561,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9371 bp->cnic_base_cl_id = FP_SB_MAX_E2; 9561 bp->cnic_base_cl_id = FP_SB_MAX_E2;
9372#endif 9562#endif
9373 9563
9564 /* multiple tx priority */
9565 if (CHIP_IS_E1x(bp))
9566 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
9567 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
9568 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
9569 if (CHIP_IS_E3B0(bp))
9570 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
9571
9374 return rc; 9572 return rc;
9375} 9573}
9376 9574
@@ -9696,6 +9894,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
9696#ifdef CONFIG_NET_POLL_CONTROLLER 9894#ifdef CONFIG_NET_POLL_CONTROLLER
9697 .ndo_poll_controller = poll_bnx2x, 9895 .ndo_poll_controller = poll_bnx2x,
9698#endif 9896#endif
9897 .ndo_setup_tc = bnx2x_setup_tc,
9898
9699}; 9899};
9700 9900
9701static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) 9901static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -9797,16 +9997,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9797 goto err_out_release; 9997 goto err_out_release;
9798 } 9998 }
9799 9999
9800 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9801 min_t(u64, BNX2X_DB_SIZE(bp),
9802 pci_resource_len(pdev, 2)));
9803 if (!bp->doorbells) {
9804 dev_err(&bp->pdev->dev,
9805 "Cannot map doorbell space, aborting\n");
9806 rc = -ENOMEM;
9807 goto err_out_unmap;
9808 }
9809
9810 bnx2x_set_power_state(bp, PCI_D0); 10000 bnx2x_set_power_state(bp, PCI_D0);
9811 10001
9812 /* clean indirect addresses */ 10002 /* clean indirect addresses */
@@ -9859,16 +10049,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9859 10049
9860 return 0; 10050 return 0;
9861 10051
9862err_out_unmap:
9863 if (bp->regview) {
9864 iounmap(bp->regview);
9865 bp->regview = NULL;
9866 }
9867 if (bp->doorbells) {
9868 iounmap(bp->doorbells);
9869 bp->doorbells = NULL;
9870 }
9871
9872err_out_release: 10052err_out_release:
9873 if (atomic_read(&pdev->enable_cnt) == 1) 10053 if (atomic_read(&pdev->enable_cnt) == 1)
9874 pci_release_regions(pdev); 10054 pci_release_regions(pdev);
@@ -10143,9 +10323,9 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
10143} 10323}
10144 10324
10145/* must be called after sriov-enable */ 10325/* must be called after sriov-enable */
10146static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) 10326static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
10147{ 10327{
10148 int cid_count = L2_FP_COUNT(l2_cid_count); 10328 int cid_count = BNX2X_L2_CID_COUNT(bp);
10149 10329
10150#ifdef BCM_CNIC 10330#ifdef BCM_CNIC
10151 cid_count += CNIC_CID_MAX; 10331 cid_count += CNIC_CID_MAX;
@@ -10154,22 +10334,33 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
10154} 10334}
10155 10335
10156/** 10336/**
10157 * bnx2x_pci_msix_table_size - get the size of the MSI-X table. 10337 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
10158 * 10338 *
10159 * @dev: pci device 10339 * @dev: pci device
10160 * 10340 *
10161 */ 10341 */
10162static inline int bnx2x_pci_msix_table_size(struct pci_dev *pdev) 10342static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
10163{ 10343{
10164 int pos; 10344 int pos;
10165 u16 control; 10345 u16 control;
10166 10346
10167 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 10347 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
10348
10349 /*
10350 * If MSI-X is not supported - return number of SBs needed to support
10351 * one fast path queue: one FP queue + SB for CNIC
10352 */
10168 if (!pos) 10353 if (!pos)
10169 return 0; 10354 return 1 + CNIC_PRESENT;
10170 10355
10356 /*
10357 * The value in the PCI configuration space is the index of the last
10358 * entry, namely one less than the actual size of the table, which is
10359 * exactly what we want to return from this function: number of all SBs
10360 * without the default SB.
10361 */
10171 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); 10362 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
10172 return (control & PCI_MSIX_FLAGS_QSIZE) + 1; 10363 return control & PCI_MSIX_FLAGS_QSIZE;
10173} 10364}
10174 10365
10175static int __devinit bnx2x_init_one(struct pci_dev *pdev, 10366static int __devinit bnx2x_init_one(struct pci_dev *pdev,
@@ -10178,34 +10369,38 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10178 struct net_device *dev = NULL; 10369 struct net_device *dev = NULL;
10179 struct bnx2x *bp; 10370 struct bnx2x *bp;
10180 int pcie_width, pcie_speed; 10371 int pcie_width, pcie_speed;
10181 int rc, cid_count; 10372 int rc, max_non_def_sbs;
10373 int rx_count, tx_count, rss_count;
10374 /*
10375 * An estimated maximum supported CoS number according to the chip
10376 * version.
10377 * We will try to roughly estimate the maximum number of CoSes this chip
10378 * may support in order to minimize the memory allocated for Tx
10379 * netdev_queue's. This number will be accurately calculated during the
10380 * initialization of bp->max_cos based on the chip versions AND chip
10381 * revision in the bnx2x_init_bp().
10382 */
10383 u8 max_cos_est = 0;
10182 10384
10183 switch (ent->driver_data) { 10385 switch (ent->driver_data) {
10184 case BCM57710: 10386 case BCM57710:
10185 case BCM57711: 10387 case BCM57711:
10186 case BCM57711E: 10388 case BCM57711E:
10389 max_cos_est = BNX2X_MULTI_TX_COS_E1X;
10390 break;
10391
10187 case BCM57712: 10392 case BCM57712:
10188 case BCM57712_MF: 10393 case BCM57712_MF:
10394 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
10395 break;
10396
10189 case BCM57800: 10397 case BCM57800:
10190 case BCM57800_MF: 10398 case BCM57800_MF:
10191 case BCM57810: 10399 case BCM57810:
10192 case BCM57810_MF: 10400 case BCM57810_MF:
10193 case BCM57840: 10401 case BCM57840:
10194 case BCM57840_MF: 10402 case BCM57840_MF:
10195 /* The size requested for the MSI-X table corresponds to the 10403 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
10196 * actual amount of avaliable IGU/HC status blocks. It includes
10197 * the default SB vector but we want cid_count to contain the
10198 * amount of only non-default SBs, that's what '-1' stands for.
10199 */
10200 cid_count = bnx2x_pci_msix_table_size(pdev) - 1;
10201
10202 /* do not allow initial cid_count grow above 16
10203 * since Special CIDs starts from this number
10204 * use old FP_SB_MAX_E1x define for this matter
10205 */
10206 cid_count = min_t(int, FP_SB_MAX_E1x, cid_count);
10207
10208 WARN_ON(!cid_count);
10209 break; 10404 break;
10210 10405
10211 default: 10406 default:
@@ -10214,26 +10409,44 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10214 return -ENODEV; 10409 return -ENODEV;
10215 } 10410 }
10216 10411
10217 cid_count += FCOE_CONTEXT_USE; 10412 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
10413
10414 /* !!! FIXME !!!
10415 * Do not allow the maximum SB count to grow above 16
10416 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
10417 * We will use the FP_SB_MAX_E1x macro for this matter.
10418 */
10419 max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
10420
10421 WARN_ON(!max_non_def_sbs);
10422
10423 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
10424 rss_count = max_non_def_sbs - CNIC_PRESENT;
10425
10426 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
10427 rx_count = rss_count + FCOE_PRESENT;
10428
10429 /*
10430 * Maximum number of netdev Tx queues:
10431 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
10432 */
10433 tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
10218 10434
10219 /* dev zeroed in init_etherdev */ 10435 /* dev zeroed in init_etherdev */
10220 dev = alloc_etherdev_mq(sizeof(*bp), cid_count); 10436 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
10221 if (!dev) { 10437 if (!dev) {
10222 dev_err(&pdev->dev, "Cannot allocate net device\n"); 10438 dev_err(&pdev->dev, "Cannot allocate net device\n");
10223 return -ENOMEM; 10439 return -ENOMEM;
10224 } 10440 }
10225 10441
10226 /* We don't need a Tx queue for a CNIC and an OOO Rx-only ring,
10227 * so update a cid_count after a netdev allocation.
10228 */
10229 cid_count += CNIC_CONTEXT_USE;
10230
10231 bp = netdev_priv(dev); 10442 bp = netdev_priv(dev);
10232 bp->msg_enable = debug;
10233 10443
10234 pci_set_drvdata(pdev, dev); 10444 DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
10445 tx_count, rx_count);
10235 10446
10236 bp->l2_cid_count = cid_count; 10447 bp->igu_sb_cnt = max_non_def_sbs;
10448 bp->msg_enable = debug;
10449 pci_set_drvdata(pdev, dev);
10237 10450
10238 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 10451 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
10239 if (rc < 0) { 10452 if (rc < 0) {
@@ -10241,14 +10454,28 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10241 return rc; 10454 return rc;
10242 } 10455 }
10243 10456
10244 BNX2X_DEV_INFO("cid_count=%d\n", cid_count); 10457 DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs);
10245 10458
10246 rc = bnx2x_init_bp(bp); 10459 rc = bnx2x_init_bp(bp);
10247 if (rc) 10460 if (rc)
10248 goto init_one_exit; 10461 goto init_one_exit;
10249 10462
10463 /*
10464 * Map doorbels here as we need the real value of bp->max_cos which
10465 * is initialized in bnx2x_init_bp().
10466 */
10467 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10468 min_t(u64, BNX2X_DB_SIZE(bp),
10469 pci_resource_len(pdev, 2)));
10470 if (!bp->doorbells) {
10471 dev_err(&bp->pdev->dev,
10472 "Cannot map doorbell space, aborting\n");
10473 rc = -ENOMEM;
10474 goto init_one_exit;
10475 }
10476
10250 /* calc qm_cid_count */ 10477 /* calc qm_cid_count */
10251 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); 10478 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
10252 10479
10253#ifdef BCM_CNIC 10480#ifdef BCM_CNIC
10254 /* disable FCOE L2 queue for E1x*/ 10481 /* disable FCOE L2 queue for E1x*/
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c
index f6322a15ed4d..358c33997536 100644
--- a/drivers/net/bnx2x/bnx2x_sp.c
+++ b/drivers/net/bnx2x/bnx2x_sp.c
@@ -4195,15 +4195,29 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4195 4195
4196 if (!test_and_clear_bit(cmd, &cur_pending)) { 4196 if (!test_and_clear_bit(cmd, &cur_pending)) {
4197 BNX2X_ERR("Bad MC reply %d for queue %d in state %d " 4197 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4198 "pending 0x%lx, next_state %d\n", cmd, o->cid, 4198 "pending 0x%lx, next_state %d\n", cmd,
4199 o->cids[BNX2X_PRIMARY_CID_INDEX],
4199 o->state, cur_pending, o->next_state); 4200 o->state, cur_pending, o->next_state);
4200 return -EINVAL; 4201 return -EINVAL;
4201 } 4202 }
4202 4203
4204 if (o->next_tx_only >= o->max_cos)
4205 /* >= becuase tx only must always be smaller than cos since the
4206 * primary connection suports COS 0
4207 */
4208 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4209 o->next_tx_only, o->max_cos);
4210
4203 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " 4211 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4204 "setting state to %d\n", cmd, o->cid, o->next_state); 4212 "setting state to %d\n", cmd,
4213 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4214
4215 if (o->next_tx_only) /* print num tx-only if any exist */
4216 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
4217 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4205 4218
4206 o->state = o->next_state; 4219 o->state = o->next_state;
4220 o->num_tx_only = o->next_tx_only;
4207 o->next_state = BNX2X_Q_STATE_MAX; 4221 o->next_state = BNX2X_Q_STATE_MAX;
4208 4222
4209 /* It's important that o->state and o->next_state are 4223 /* It's important that o->state and o->next_state are
@@ -4230,135 +4244,193 @@ static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4230 CLIENT_INIT_RX_DATA_TPA_EN_IPV6; 4244 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4231} 4245}
4232 4246
4233static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, 4247static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4234 struct bnx2x_queue_state_params *cmd_params, 4248 struct bnx2x_queue_sp_obj *o,
4235 struct client_init_ramrod_data *data) 4249 struct bnx2x_general_setup_params *params,
4236{ 4250 struct client_init_general_data *gen_data,
4237 struct bnx2x_queue_sp_obj *o = cmd_params->q_obj; 4251 unsigned long *flags)
4238 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; 4252{
4239 4253 gen_data->client_id = o->cl_id;
4240 4254
4241 /* general */ 4255 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4242 data->general.client_id = o->cl_id; 4256 gen_data->statistics_counter_id =
4243 4257 params->stat_id;
4244 if (test_bit(BNX2X_Q_FLG_STATS, &params->flags)) { 4258 gen_data->statistics_en_flg = 1;
4245 data->general.statistics_counter_id = 4259 gen_data->statistics_zero_flg =
4246 params->gen_params.stat_id; 4260 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4247 data->general.statistics_en_flg = 1;
4248 data->general.statistics_zero_flg =
4249 test_bit(BNX2X_Q_FLG_ZERO_STATS, &params->flags);
4250 } else 4261 } else
4251 data->general.statistics_counter_id = 4262 gen_data->statistics_counter_id =
4252 DISABLE_STATISTIC_COUNTER_ID_VALUE; 4263 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4253 4264
4254 data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, &params->flags); 4265 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4255 data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, 4266 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4256 &params->flags); 4267 gen_data->sp_client_id = params->spcl_id;
4257 data->general.sp_client_id = params->gen_params.spcl_id; 4268 gen_data->mtu = cpu_to_le16(params->mtu);
4258 data->general.mtu = cpu_to_le16(params->gen_params.mtu); 4269 gen_data->func_id = o->func_id;
4259 data->general.func_id = o->func_id;
4260 4270
4261 4271
4262 data->general.cos = params->txq_params.cos; 4272 gen_data->cos = params->cos;
4263 4273
4264 data->general.traffic_type = 4274 gen_data->traffic_type =
4265 test_bit(BNX2X_Q_FLG_FCOE, &params->flags) ? 4275 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4266 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4276 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4267 4277
4268 /* Rx data */ 4278 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
4269 data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, &params->flags) * 4279 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4280}
4281
4282static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4283 struct bnx2x_txq_setup_params *params,
4284 struct client_init_tx_data *tx_data,
4285 unsigned long *flags)
4286{
4287 tx_data->enforce_security_flg =
4288 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4289 tx_data->default_vlan =
4290 cpu_to_le16(params->default_vlan);
4291 tx_data->default_vlan_flg =
4292 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4293 tx_data->tx_switching_flg =
4294 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4295 tx_data->anti_spoofing_flg =
4296 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4297 tx_data->tx_status_block_id = params->fw_sb_id;
4298 tx_data->tx_sb_index_number = params->sb_cq_index;
4299 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4300
4301 tx_data->tx_bd_page_base.lo =
4302 cpu_to_le32(U64_LO(params->dscr_map));
4303 tx_data->tx_bd_page_base.hi =
4304 cpu_to_le32(U64_HI(params->dscr_map));
4305
4306 /* Don't configure any Tx switching mode during queue SETUP */
4307 tx_data->state = 0;
4308}
4309
4310static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4311 struct rxq_pause_params *params,
4312 struct client_init_rx_data *rx_data)
4313{
4314 /* flow control data */
4315 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4316 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4317 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4318 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4319 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4320 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4321 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4322}
4323
4324static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4325 struct bnx2x_rxq_setup_params *params,
4326 struct client_init_rx_data *rx_data,
4327 unsigned long *flags)
4328{
4329 /* Rx data */
4330 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4270 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4331 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4271 data->rx.vmqueue_mode_en_flg = 0; 4332 rx_data->vmqueue_mode_en_flg = 0;
4272 4333
4273 data->rx.cache_line_alignment_log_size = 4334 rx_data->cache_line_alignment_log_size =
4274 params->rxq_params.cache_line_log; 4335 params->cache_line_log;
4275 data->rx.enable_dynamic_hc = 4336 rx_data->enable_dynamic_hc =
4276 test_bit(BNX2X_Q_FLG_DHC, &params->flags); 4337 test_bit(BNX2X_Q_FLG_DHC, flags);
4277 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; 4338 rx_data->max_sges_for_packet = params->max_sges_pkt;
4278 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; 4339 rx_data->client_qzone_id = params->cl_qzone_id;
4279 data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz); 4340 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4280 4341
4281 /* Always start in DROP_ALL mode */ 4342 /* Always start in DROP_ALL mode */
4282 data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | 4343 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4283 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); 4344 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4284 4345
4285 /* We don't set drop flags */ 4346 /* We don't set drop flags */
4286 data->rx.drop_ip_cs_err_flg = 0; 4347 rx_data->drop_ip_cs_err_flg = 0;
4287 data->rx.drop_tcp_cs_err_flg = 0; 4348 rx_data->drop_tcp_cs_err_flg = 0;
4288 data->rx.drop_ttl0_flg = 0; 4349 rx_data->drop_ttl0_flg = 0;
4289 data->rx.drop_udp_cs_err_flg = 0; 4350 rx_data->drop_udp_cs_err_flg = 0;
4290 data->rx.inner_vlan_removal_enable_flg = 4351 rx_data->inner_vlan_removal_enable_flg =
4291 test_bit(BNX2X_Q_FLG_VLAN, &params->flags); 4352 test_bit(BNX2X_Q_FLG_VLAN, flags);
4292 data->rx.outer_vlan_removal_enable_flg = 4353 rx_data->outer_vlan_removal_enable_flg =
4293 test_bit(BNX2X_Q_FLG_OV, &params->flags); 4354 test_bit(BNX2X_Q_FLG_OV, flags);
4294 data->rx.status_block_id = params->rxq_params.fw_sb_id; 4355 rx_data->status_block_id = params->fw_sb_id;
4295 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; 4356 rx_data->rx_sb_index_number = params->sb_cq_index;
4296 data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues; 4357 rx_data->max_tpa_queues = params->max_tpa_queues;
4297 data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz); 4358 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4298 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); 4359 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4299 data->rx.bd_page_base.lo = 4360 rx_data->bd_page_base.lo =
4300 cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); 4361 cpu_to_le32(U64_LO(params->dscr_map));
4301 data->rx.bd_page_base.hi = 4362 rx_data->bd_page_base.hi =
4302 cpu_to_le32(U64_HI(params->rxq_params.dscr_map)); 4363 cpu_to_le32(U64_HI(params->dscr_map));
4303 data->rx.sge_page_base.lo = 4364 rx_data->sge_page_base.lo =
4304 cpu_to_le32(U64_LO(params->rxq_params.sge_map)); 4365 cpu_to_le32(U64_LO(params->sge_map));
4305 data->rx.sge_page_base.hi = 4366 rx_data->sge_page_base.hi =
4306 cpu_to_le32(U64_HI(params->rxq_params.sge_map)); 4367 cpu_to_le32(U64_HI(params->sge_map));
4307 data->rx.cqe_page_base.lo = 4368 rx_data->cqe_page_base.lo =
4308 cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); 4369 cpu_to_le32(U64_LO(params->rcq_map));
4309 data->rx.cqe_page_base.hi = 4370 rx_data->cqe_page_base.hi =
4310 cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); 4371 cpu_to_le32(U64_HI(params->rcq_map));
4311 data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, 4372 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4312 &params->flags); 4373
4313 4374 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4314 if (test_bit(BNX2X_Q_FLG_MCAST, &params->flags)) { 4375 rx_data->approx_mcast_engine_id = o->func_id;
4315 data->rx.approx_mcast_engine_id = o->func_id; 4376 rx_data->is_approx_mcast = 1;
4316 data->rx.is_approx_mcast = 1;
4317 } 4377 }
4318 4378
4319 data->rx.rss_engine_id = params->rxq_params.rss_engine_id; 4379 rx_data->rss_engine_id = params->rss_engine_id;
4320
4321 /* flow control data */
4322 data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
4323 data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
4324 data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
4325 data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
4326 data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
4327 data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
4328 data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
4329 4380
4330 /* silent vlan removal */ 4381 /* silent vlan removal */
4331 data->rx.silent_vlan_removal_flg = 4382 rx_data->silent_vlan_removal_flg =
4332 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &params->flags); 4383 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4333 data->rx.silent_vlan_value = 4384 rx_data->silent_vlan_value =
4334 cpu_to_le16(params->rxq_params.silent_removal_value); 4385 cpu_to_le16(params->silent_removal_value);
4335 data->rx.silent_vlan_mask = 4386 rx_data->silent_vlan_mask =
4336 cpu_to_le16(params->rxq_params.silent_removal_mask); 4387 cpu_to_le16(params->silent_removal_mask);
4337
4338 /* Tx data */
4339 data->tx.enforce_security_flg =
4340 test_bit(BNX2X_Q_FLG_TX_SEC, &params->flags);
4341 data->tx.default_vlan =
4342 cpu_to_le16(params->txq_params.default_vlan);
4343 data->tx.default_vlan_flg =
4344 test_bit(BNX2X_Q_FLG_DEF_VLAN, &params->flags);
4345 data->tx.tx_switching_flg =
4346 test_bit(BNX2X_Q_FLG_TX_SWITCH, &params->flags);
4347 data->tx.anti_spoofing_flg =
4348 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, &params->flags);
4349 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
4350 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
4351 data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id;
4352
4353 data->tx.tx_bd_page_base.lo =
4354 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
4355 data->tx.tx_bd_page_base.hi =
4356 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
4357 4388
4358 /* Don't configure any Tx switching mode during queue SETUP */
4359 data->tx.state = 0;
4360} 4389}
4361 4390
4391/* initialize the general, tx and rx parts of a queue object */
4392static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4393 struct bnx2x_queue_state_params *cmd_params,
4394 struct client_init_ramrod_data *data)
4395{
4396 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4397 &cmd_params->params.setup.gen_params,
4398 &data->general,
4399 &cmd_params->params.setup.flags);
4400
4401 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4402 &cmd_params->params.setup.txq_params,
4403 &data->tx,
4404 &cmd_params->params.setup.flags);
4405
4406 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4407 &cmd_params->params.setup.rxq_params,
4408 &data->rx,
4409 &cmd_params->params.setup.flags);
4410
4411 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4412 &cmd_params->params.setup.pause_params,
4413 &data->rx);
4414}
4415
4416/* initialize the general and tx parts of a tx-only queue object */
4417static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4418 struct bnx2x_queue_state_params *cmd_params,
4419 struct tx_queue_init_ramrod_data *data)
4420{
4421 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4422 &cmd_params->params.tx_only.gen_params,
4423 &data->general,
4424 &cmd_params->params.tx_only.flags);
4425
4426 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4427 &cmd_params->params.tx_only.txq_params,
4428 &data->tx,
4429 &cmd_params->params.tx_only.flags);
4430
4431 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
4432 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4433}
4362 4434
4363/** 4435/**
4364 * bnx2x_q_init - init HW/FW queue 4436 * bnx2x_q_init - init HW/FW queue
@@ -4377,6 +4449,7 @@ static inline int bnx2x_q_init(struct bnx2x *bp,
4377 struct bnx2x_queue_sp_obj *o = params->q_obj; 4449 struct bnx2x_queue_sp_obj *o = params->q_obj;
4378 struct bnx2x_queue_init_params *init = &params->params.init; 4450 struct bnx2x_queue_init_params *init = &params->params.init;
4379 u16 hc_usec; 4451 u16 hc_usec;
4452 u8 cos;
4380 4453
4381 /* Tx HC configuration */ 4454 /* Tx HC configuration */
4382 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && 4455 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
@@ -4401,7 +4474,12 @@ static inline int bnx2x_q_init(struct bnx2x *bp,
4401 } 4474 }
4402 4475
4403 /* Set CDU context validation values */ 4476 /* Set CDU context validation values */
4404 bnx2x_set_ctx_validation(bp, init->cxt, o->cid); 4477 for (cos = 0; cos < o->max_cos; cos++) {
4478 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
4479 o->cids[cos], cos);
4480 DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
4481 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4482 }
4405 4483
4406 /* As no ramrod is sent, complete the command immediately */ 4484 /* As no ramrod is sent, complete the command immediately */
4407 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); 4485 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
@@ -4429,7 +4507,8 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4429 4507
4430 mb(); 4508 mb();
4431 4509
4432 return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping), 4510 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4511 U64_HI(data_mapping),
4433 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4512 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4434} 4513}
4435 4514
@@ -4449,9 +4528,57 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4449 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4528 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4450 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4529 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4451 4530
4452 mb();
4453 4531
4454 return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping), 4532 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4533 U64_HI(data_mapping),
4534 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4535}
4536
4537static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4538 struct bnx2x_queue_state_params *params)
4539{
4540 struct bnx2x_queue_sp_obj *o = params->q_obj;
4541 struct tx_queue_init_ramrod_data *rdata =
4542 (struct tx_queue_init_ramrod_data *)o->rdata;
4543 dma_addr_t data_mapping = o->rdata_mapping;
4544 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4545 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4546 &params->params.tx_only;
4547 u8 cid_index = tx_only_params->cid_index;
4548
4549
4550 if (cid_index >= o->max_cos) {
4551 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4552 o->cl_id, cid_index);
4553 return -EINVAL;
4554 }
4555
4556 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
4557 tx_only_params->gen_params.cos,
4558 tx_only_params->gen_params.spcl_id);
4559
4560 /* Clear the ramrod data */
4561 memset(rdata, 0, sizeof(*rdata));
4562
4563 /* Fill the ramrod data */
4564 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4565
4566 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4567 "sp-client id %d, cos %d",
4568 o->cids[cid_index],
4569 rdata->general.client_id,
4570 rdata->general.sp_client_id, rdata->general.cos);
4571
4572 /*
4573 * No need for an explicit memory barrier here as long we would
4574 * need to ensure the ordering of writing to the SPQ element
4575 * and updating of the SPQ producer which involves a memory
4576 * read and we will have to put a full memory barrier there
4577 * (inside bnx2x_sp_post()).
4578 */
4579
4580 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4581 U64_HI(data_mapping),
4455 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4582 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4456} 4583}
4457 4584
@@ -4521,17 +4648,27 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
4521 struct client_update_ramrod_data *rdata = 4648 struct client_update_ramrod_data *rdata =
4522 (struct client_update_ramrod_data *)o->rdata; 4649 (struct client_update_ramrod_data *)o->rdata;
4523 dma_addr_t data_mapping = o->rdata_mapping; 4650 dma_addr_t data_mapping = o->rdata_mapping;
4651 struct bnx2x_queue_update_params *update_params =
4652 &params->params.update;
4653 u8 cid_index = update_params->cid_index;
4654
4655 if (cid_index >= o->max_cos) {
4656 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4657 o->cl_id, cid_index);
4658 return -EINVAL;
4659 }
4660
4524 4661
4525 /* Clear the ramrod data */ 4662 /* Clear the ramrod data */
4526 memset(rdata, 0, sizeof(*rdata)); 4663 memset(rdata, 0, sizeof(*rdata));
4527 4664
4528 /* Fill the ramrod data */ 4665 /* Fill the ramrod data */
4529 bnx2x_q_fill_update_data(bp, o, &params->params.update, rdata); 4666 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4530 4667
4531 mb(); 4668 mb();
4532 4669
4533 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid, 4670 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4534 U64_HI(data_mapping), 4671 o->cids[cid_index], U64_HI(data_mapping),
4535 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4672 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4536} 4673}
4537 4674
@@ -4588,7 +4725,8 @@ static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4588{ 4725{
4589 struct bnx2x_queue_sp_obj *o = params->q_obj; 4726 struct bnx2x_queue_sp_obj *o = params->q_obj;
4590 4727
4591 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id, 4728 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4729 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4592 ETH_CONNECTION_TYPE); 4730 ETH_CONNECTION_TYPE);
4593} 4731}
4594 4732
@@ -4596,18 +4734,32 @@ static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4596 struct bnx2x_queue_state_params *params) 4734 struct bnx2x_queue_state_params *params)
4597{ 4735{
4598 struct bnx2x_queue_sp_obj *o = params->q_obj; 4736 struct bnx2x_queue_sp_obj *o = params->q_obj;
4737 u8 cid_idx = params->params.cfc_del.cid_index;
4599 4738
4600 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0, 4739 if (cid_idx >= o->max_cos) {
4601 NONE_CONNECTION_TYPE); 4740 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4741 o->cl_id, cid_idx);
4742 return -EINVAL;
4743 }
4744
4745 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4746 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4602} 4747}
4603 4748
4604static inline int bnx2x_q_send_terminate(struct bnx2x *bp, 4749static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4605 struct bnx2x_queue_state_params *params) 4750 struct bnx2x_queue_state_params *params)
4606{ 4751{
4607 struct bnx2x_queue_sp_obj *o = params->q_obj; 4752 struct bnx2x_queue_sp_obj *o = params->q_obj;
4753 u8 cid_index = params->params.terminate.cid_index;
4608 4754
4609 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0, 4755 if (cid_index >= o->max_cos) {
4610 ETH_CONNECTION_TYPE); 4756 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4757 o->cl_id, cid_index);
4758 return -EINVAL;
4759 }
4760
4761 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4762 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4611} 4763}
4612 4764
4613static inline int bnx2x_q_send_empty(struct bnx2x *bp, 4765static inline int bnx2x_q_send_empty(struct bnx2x *bp,
@@ -4615,7 +4767,8 @@ static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4615{ 4767{
4616 struct bnx2x_queue_sp_obj *o = params->q_obj; 4768 struct bnx2x_queue_sp_obj *o = params->q_obj;
4617 4769
4618 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0, 4770 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4771 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4619 ETH_CONNECTION_TYPE); 4772 ETH_CONNECTION_TYPE);
4620} 4773}
4621 4774
@@ -4625,6 +4778,8 @@ static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4625 switch (params->cmd) { 4778 switch (params->cmd) {
4626 case BNX2X_Q_CMD_INIT: 4779 case BNX2X_Q_CMD_INIT:
4627 return bnx2x_q_init(bp, params); 4780 return bnx2x_q_init(bp, params);
4781 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4782 return bnx2x_q_send_setup_tx_only(bp, params);
4628 case BNX2X_Q_CMD_DEACTIVATE: 4783 case BNX2X_Q_CMD_DEACTIVATE:
4629 return bnx2x_q_send_deactivate(bp, params); 4784 return bnx2x_q_send_deactivate(bp, params);
4630 case BNX2X_Q_CMD_ACTIVATE: 4785 case BNX2X_Q_CMD_ACTIVATE:
@@ -4654,6 +4809,7 @@ static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4654 case BNX2X_Q_CMD_SETUP: 4809 case BNX2X_Q_CMD_SETUP:
4655 return bnx2x_q_send_setup_e1x(bp, params); 4810 return bnx2x_q_send_setup_e1x(bp, params);
4656 case BNX2X_Q_CMD_INIT: 4811 case BNX2X_Q_CMD_INIT:
4812 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4657 case BNX2X_Q_CMD_DEACTIVATE: 4813 case BNX2X_Q_CMD_DEACTIVATE:
4658 case BNX2X_Q_CMD_ACTIVATE: 4814 case BNX2X_Q_CMD_ACTIVATE:
4659 case BNX2X_Q_CMD_UPDATE: 4815 case BNX2X_Q_CMD_UPDATE:
@@ -4676,6 +4832,7 @@ static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4676 case BNX2X_Q_CMD_SETUP: 4832 case BNX2X_Q_CMD_SETUP:
4677 return bnx2x_q_send_setup_e2(bp, params); 4833 return bnx2x_q_send_setup_e2(bp, params);
4678 case BNX2X_Q_CMD_INIT: 4834 case BNX2X_Q_CMD_INIT:
4835 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4679 case BNX2X_Q_CMD_DEACTIVATE: 4836 case BNX2X_Q_CMD_DEACTIVATE:
4680 case BNX2X_Q_CMD_ACTIVATE: 4837 case BNX2X_Q_CMD_ACTIVATE:
4681 case BNX2X_Q_CMD_UPDATE: 4838 case BNX2X_Q_CMD_UPDATE:
@@ -4713,6 +4870,9 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4713{ 4870{
4714 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; 4871 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4715 enum bnx2x_queue_cmd cmd = params->cmd; 4872 enum bnx2x_queue_cmd cmd = params->cmd;
4873 struct bnx2x_queue_update_params *update_params =
4874 &params->params.update;
4875 u8 next_tx_only = o->num_tx_only;
4716 4876
4717 switch (state) { 4877 switch (state) {
4718 case BNX2X_Q_STATE_RESET: 4878 case BNX2X_Q_STATE_RESET:
@@ -4738,13 +4898,42 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4738 (cmd == BNX2X_Q_CMD_UPDATE_TPA)) 4898 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4739 next_state = BNX2X_Q_STATE_ACTIVE; 4899 next_state = BNX2X_Q_STATE_ACTIVE;
4740 4900
4901 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4902 next_state = BNX2X_Q_STATE_MULTI_COS;
4903 next_tx_only = 1;
4904 }
4905
4741 else if (cmd == BNX2X_Q_CMD_HALT) 4906 else if (cmd == BNX2X_Q_CMD_HALT)
4742 next_state = BNX2X_Q_STATE_STOPPED; 4907 next_state = BNX2X_Q_STATE_STOPPED;
4743 4908
4744 else if (cmd == BNX2X_Q_CMD_UPDATE) { 4909 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4745 struct bnx2x_queue_update_params *update_params = 4910 /* If "active" state change is requested, update the
4746 &params->params.update; 4911 * state accordingly.
4912 */
4913 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4914 &update_params->update_flags) &&
4915 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4916 &update_params->update_flags))
4917 next_state = BNX2X_Q_STATE_INACTIVE;
4918 else
4919 next_state = BNX2X_Q_STATE_ACTIVE;
4920 }
4747 4921
4922 break;
4923 case BNX2X_Q_STATE_MULTI_COS:
4924 if (cmd == BNX2X_Q_CMD_TERMINATE)
4925 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4926
4927 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4928 next_state = BNX2X_Q_STATE_MULTI_COS;
4929 next_tx_only = o->num_tx_only + 1;
4930 }
4931
4932 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4933 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4934 next_state = BNX2X_Q_STATE_MULTI_COS;
4935
4936 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4748 /* If "active" state change is requested, update the 4937 /* If "active" state change is requested, update the
4749 * state accordingly. 4938 * state accordingly.
4750 */ 4939 */
@@ -4754,7 +4943,17 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4754 &update_params->update_flags)) 4943 &update_params->update_flags))
4755 next_state = BNX2X_Q_STATE_INACTIVE; 4944 next_state = BNX2X_Q_STATE_INACTIVE;
4756 else 4945 else
4946 next_state = BNX2X_Q_STATE_MULTI_COS;
4947 }
4948
4949 break;
4950 case BNX2X_Q_STATE_MCOS_TERMINATED:
4951 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
4952 next_tx_only = o->num_tx_only - 1;
4953 if (next_tx_only == 0)
4757 next_state = BNX2X_Q_STATE_ACTIVE; 4954 next_state = BNX2X_Q_STATE_ACTIVE;
4955 else
4956 next_state = BNX2X_Q_STATE_MULTI_COS;
4758 } 4957 }
4759 4958
4760 break; 4959 break;
@@ -4770,18 +4969,18 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4770 next_state = BNX2X_Q_STATE_STOPPED; 4969 next_state = BNX2X_Q_STATE_STOPPED;
4771 4970
4772 else if (cmd == BNX2X_Q_CMD_UPDATE) { 4971 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4773 struct bnx2x_queue_update_params *update_params =
4774 &params->params.update;
4775
4776 /* If "active" state change is requested, update the 4972 /* If "active" state change is requested, update the
4777 * state accordingly. 4973 * state accordingly.
4778 */ 4974 */
4779 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, 4975 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4780 &update_params->update_flags) && 4976 &update_params->update_flags) &&
4781 test_bit(BNX2X_Q_UPDATE_ACTIVATE, 4977 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4782 &update_params->update_flags)) 4978 &update_params->update_flags)){
4783 next_state = BNX2X_Q_STATE_ACTIVE; 4979 if (o->num_tx_only == 0)
4784 else 4980 next_state = BNX2X_Q_STATE_ACTIVE;
4981 else /* tx only queues exist for this queue */
4982 next_state = BNX2X_Q_STATE_MULTI_COS;
4983 } else
4785 next_state = BNX2X_Q_STATE_INACTIVE; 4984 next_state = BNX2X_Q_STATE_INACTIVE;
4786 } 4985 }
4787 4986
@@ -4805,6 +5004,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4805 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", 5004 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
4806 state, cmd, next_state); 5005 state, cmd, next_state);
4807 o->next_state = next_state; 5006 o->next_state = next_state;
5007 o->next_tx_only = next_tx_only;
4808 return 0; 5008 return 0;
4809 } 5009 }
4810 5010
@@ -4815,12 +5015,17 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4815 5015
4816void bnx2x_init_queue_obj(struct bnx2x *bp, 5016void bnx2x_init_queue_obj(struct bnx2x *bp,
4817 struct bnx2x_queue_sp_obj *obj, 5017 struct bnx2x_queue_sp_obj *obj,
4818 u8 cl_id, u32 cid, u8 func_id, void *rdata, 5018 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5019 void *rdata,
4819 dma_addr_t rdata_mapping, unsigned long type) 5020 dma_addr_t rdata_mapping, unsigned long type)
4820{ 5021{
4821 memset(obj, 0, sizeof(*obj)); 5022 memset(obj, 0, sizeof(*obj));
4822 5023
4823 obj->cid = cid; 5024 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5025 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5026
5027 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5028 obj->max_cos = cid_cnt;
4824 obj->cl_id = cl_id; 5029 obj->cl_id = cl_id;
4825 obj->func_id = func_id; 5030 obj->func_id = func_id;
4826 obj->rdata = rdata; 5031 obj->rdata = rdata;
@@ -4840,6 +5045,13 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
4840 obj->set_pending = bnx2x_queue_set_pending; 5045 obj->set_pending = bnx2x_queue_set_pending;
4841} 5046}
4842 5047
5048void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5049 struct bnx2x_queue_sp_obj *obj,
5050 u32 cid, u8 index)
5051{
5052 obj->cids[index] = cid;
5053}
5054
4843/********************** Function state object *********************************/ 5055/********************** Function state object *********************************/
4844 5056
4845static int bnx2x_func_wait_comp(struct bnx2x *bp, 5057static int bnx2x_func_wait_comp(struct bnx2x *bp,
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
index 86eaa80721ea..83f3b0b44211 100644
--- a/drivers/net/bnx2x/bnx2x_sp.h
+++ b/drivers/net/bnx2x/bnx2x_sp.h
@@ -721,6 +721,8 @@ enum bnx2x_q_state {
721 BNX2X_Q_STATE_RESET, 721 BNX2X_Q_STATE_RESET,
722 BNX2X_Q_STATE_INITIALIZED, 722 BNX2X_Q_STATE_INITIALIZED,
723 BNX2X_Q_STATE_ACTIVE, 723 BNX2X_Q_STATE_ACTIVE,
724 BNX2X_Q_STATE_MULTI_COS,
725 BNX2X_Q_STATE_MCOS_TERMINATED,
724 BNX2X_Q_STATE_INACTIVE, 726 BNX2X_Q_STATE_INACTIVE,
725 BNX2X_Q_STATE_STOPPED, 727 BNX2X_Q_STATE_STOPPED,
726 BNX2X_Q_STATE_TERMINATED, 728 BNX2X_Q_STATE_TERMINATED,
@@ -732,6 +734,7 @@ enum bnx2x_q_state {
732enum bnx2x_queue_cmd { 734enum bnx2x_queue_cmd {
733 BNX2X_Q_CMD_INIT, 735 BNX2X_Q_CMD_INIT,
734 BNX2X_Q_CMD_SETUP, 736 BNX2X_Q_CMD_SETUP,
737 BNX2X_Q_CMD_SETUP_TX_ONLY,
735 BNX2X_Q_CMD_DEACTIVATE, 738 BNX2X_Q_CMD_DEACTIVATE,
736 BNX2X_Q_CMD_ACTIVATE, 739 BNX2X_Q_CMD_ACTIVATE,
737 BNX2X_Q_CMD_UPDATE, 740 BNX2X_Q_CMD_UPDATE,
@@ -774,6 +777,13 @@ enum bnx2x_q_type {
774 BNX2X_Q_TYPE_HAS_TX, 777 BNX2X_Q_TYPE_HAS_TX,
775}; 778};
776 779
780#define BNX2X_PRIMARY_CID_INDEX 0
781#define BNX2X_MULTI_TX_COS_E1X 1
782#define BNX2X_MULTI_TX_COS_E2_E3A0 2
783#define BNX2X_MULTI_TX_COS_E3B0 3
784#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0
785
786
777struct bnx2x_queue_init_params { 787struct bnx2x_queue_init_params {
778 struct { 788 struct {
779 unsigned long flags; 789 unsigned long flags;
@@ -790,7 +800,20 @@ struct bnx2x_queue_init_params {
790 } rx; 800 } rx;
791 801
792 /* CID context in the host memory */ 802 /* CID context in the host memory */
793 struct eth_context *cxt; 803 struct eth_context *cxts[BNX2X_MULTI_TX_COS];
804
805 /* maximum number of cos supported by hardware */
806 u8 max_cos;
807};
808
809struct bnx2x_queue_terminate_params {
810 /* index within the tx_only cids of this queue object */
811 u8 cid_index;
812};
813
814struct bnx2x_queue_cfc_del_params {
815 /* index within the tx_only cids of this queue object */
816 u8 cid_index;
794}; 817};
795 818
796struct bnx2x_queue_update_params { 819struct bnx2x_queue_update_params {
@@ -798,6 +821,8 @@ struct bnx2x_queue_update_params {
798 u16 def_vlan; 821 u16 def_vlan;
799 u16 silent_removal_value; 822 u16 silent_removal_value;
800 u16 silent_removal_mask; 823 u16 silent_removal_mask;
824/* index within the tx_only cids of this queue object */
825 u8 cid_index;
801}; 826};
802 827
803struct rxq_pause_params { 828struct rxq_pause_params {
@@ -817,6 +842,7 @@ struct bnx2x_general_setup_params {
817 842
818 u8 spcl_id; 843 u8 spcl_id;
819 u16 mtu; 844 u16 mtu;
845 u8 cos;
820}; 846};
821 847
822struct bnx2x_rxq_setup_params { 848struct bnx2x_rxq_setup_params {
@@ -863,13 +889,20 @@ struct bnx2x_txq_setup_params {
863}; 889};
864 890
865struct bnx2x_queue_setup_params { 891struct bnx2x_queue_setup_params {
866 struct rxq_pause_params pause;
867 struct bnx2x_general_setup_params gen_params; 892 struct bnx2x_general_setup_params gen_params;
868 struct bnx2x_rxq_setup_params rxq_params;
869 struct bnx2x_txq_setup_params txq_params; 893 struct bnx2x_txq_setup_params txq_params;
894 struct bnx2x_rxq_setup_params rxq_params;
895 struct rxq_pause_params pause_params;
870 unsigned long flags; 896 unsigned long flags;
871}; 897};
872 898
899struct bnx2x_queue_setup_tx_only_params {
900 struct bnx2x_general_setup_params gen_params;
901 struct bnx2x_txq_setup_params txq_params;
902 unsigned long flags;
903 /* index within the tx_only cids of this queue object */
904 u8 cid_index;
905};
873 906
874struct bnx2x_queue_state_params { 907struct bnx2x_queue_state_params {
875 struct bnx2x_queue_sp_obj *q_obj; 908 struct bnx2x_queue_sp_obj *q_obj;
@@ -878,21 +911,36 @@ struct bnx2x_queue_state_params {
878 enum bnx2x_queue_cmd cmd; 911 enum bnx2x_queue_cmd cmd;
879 912
880 /* may have RAMROD_COMP_WAIT set only */ 913 /* may have RAMROD_COMP_WAIT set only */
881 unsigned long ramrod_flags; 914 unsigned long ramrod_flags;
882 915
883 /* Params according to the current command */ 916 /* Params according to the current command */
884 union { 917 union {
885 struct bnx2x_queue_update_params update; 918 struct bnx2x_queue_update_params update;
886 struct bnx2x_queue_setup_params setup; 919 struct bnx2x_queue_setup_params setup;
887 struct bnx2x_queue_init_params init; 920 struct bnx2x_queue_init_params init;
921 struct bnx2x_queue_setup_tx_only_params tx_only;
922 struct bnx2x_queue_terminate_params terminate;
923 struct bnx2x_queue_cfc_del_params cfc_del;
888 } params; 924 } params;
889}; 925};
890 926
891struct bnx2x_queue_sp_obj { 927struct bnx2x_queue_sp_obj {
892 u32 cid; 928 u32 cids[BNX2X_MULTI_TX_COS];
893 u8 cl_id; 929 u8 cl_id;
894 u8 func_id; 930 u8 func_id;
895 931
932 /*
933 * number of traffic classes supported by queue.
934 * The primary connection of the queue suppotrs the first traffic
935 * class. Any further traffic class is suppoted by a tx-only
936 * connection.
937 *
938 * Therefore max_cos is also a number of valid entries in the cids
939 * array.
940 */
941 u8 max_cos;
942 u8 num_tx_only, next_tx_only;
943
896 enum bnx2x_q_state state, next_state; 944 enum bnx2x_q_state state, next_state;
897 945
898 /* bits from enum bnx2x_q_type */ 946 /* bits from enum bnx2x_q_type */
@@ -1106,9 +1154,9 @@ int bnx2x_func_state_change(struct bnx2x *bp,
1106 1154
1107/******************* Queue State **************/ 1155/******************* Queue State **************/
1108void bnx2x_init_queue_obj(struct bnx2x *bp, 1156void bnx2x_init_queue_obj(struct bnx2x *bp,
1109 struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 cid, 1157 struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
1110 u8 func_id, void *rdata, dma_addr_t rdata_mapping, 1158 u8 cid_cnt, u8 func_id, void *rdata,
1111 unsigned long type); 1159 dma_addr_t rdata_mapping, unsigned long type);
1112 1160
1113int bnx2x_queue_state_change(struct bnx2x *bp, 1161int bnx2x_queue_state_change(struct bnx2x *bp,
1114 struct bnx2x_queue_state_params *params); 1162 struct bnx2x_queue_state_params *params);
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 54c07f557ad4..771f6803b238 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1185,7 +1185,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1185 1185
1186 if (netif_msg_timer(bp)) { 1186 if (netif_msg_timer(bp)) {
1187 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1187 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1188 int i; 1188 int i, cos;
1189 1189
1190 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", 1190 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
1191 estats->brb_drop_lo, estats->brb_truncate_lo); 1191 estats->brb_drop_lo, estats->brb_truncate_lo);
@@ -1206,20 +1206,32 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1206 1206
1207 for_each_eth_queue(bp, i) { 1207 for_each_eth_queue(bp, i) {
1208 struct bnx2x_fastpath *fp = &bp->fp[i]; 1208 struct bnx2x_fastpath *fp = &bp->fp[i];
1209 struct bnx2x_fp_txdata *txdata;
1209 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1210 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1210 struct netdev_queue *txq = 1211 struct netdev_queue *txq;
1211 netdev_get_tx_queue(bp->dev, i); 1212
1212 1213 printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)",
1213 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" 1214 fp->name, bnx2x_hilo(
1214 " tx pkt(%lu) tx calls (%lu)" 1215 &qstats->total_unicast_packets_transmitted_hi),
1215 " %s (Xoff events %u)\n", 1216 qstats->driver_xoff);
1216 fp->name, bnx2x_tx_avail(fp), 1217
1217 le16_to_cpu(*fp->tx_cons_sb), 1218 for_each_cos_in_tx_queue(fp, cos) {
1218 bnx2x_hilo(&qstats-> 1219 txdata = &fp->txdata[cos];
1219 total_unicast_packets_transmitted_hi), 1220 txq = netdev_get_tx_queue(bp->dev,
1220 fp->tx_pkt, 1221 FP_COS_TO_TXQ(fp, cos));
1221 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), 1222
1222 qstats->driver_xoff); 1223 printk(KERN_DEBUG "%d: tx avail(%4u)"
1224 " *tx_cons_sb(%u)"
1225 " tx calls (%lu)"
1226 " %s\n",
1227 cos,
1228 bnx2x_tx_avail(bp, txdata),
1229 le16_to_cpu(*txdata->tx_cons_sb),
1230 txdata->tx_pkt,
1231 (netif_tx_queue_stopped(txq) ?
1232 "Xoff" : "Xon")
1233 );
1234 }
1223 } 1235 }
1224 } 1236 }
1225 1237