diff options
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r-- | drivers/net/bnx2x/bnx2x.h | 142 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 62 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.h | 2 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dcb.c | 3 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_ethtool.c | 48 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_link.c | 46 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_main.c | 187 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_reg.h | 45 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.c | 7 |
9 files changed, 395 insertions, 147 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c423504a755f..9a7eb3b36cf3 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp); | |||
239 | * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X | 239 | * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X |
240 | * | 240 | * |
241 | */ | 241 | */ |
242 | /* iSCSI L2 */ | 242 | enum { |
243 | #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 | 243 | BNX2X_ISCSI_ETH_CL_ID_IDX, |
244 | #define BNX2X_ISCSI_ETH_CID 49 | 244 | BNX2X_FCOE_ETH_CL_ID_IDX, |
245 | BNX2X_MAX_CNIC_ETH_CL_ID_IDX, | ||
246 | }; | ||
245 | 247 | ||
246 | /* FCoE L2 */ | 248 | #define BNX2X_CNIC_START_ETH_CID 48 |
247 | #define BNX2X_FCOE_ETH_CL_ID_IDX 2 | 249 | enum { |
248 | #define BNX2X_FCOE_ETH_CID 50 | 250 | /* iSCSI L2 */ |
251 | BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, | ||
252 | /* FCoE L2 */ | ||
253 | BNX2X_FCOE_ETH_CID, | ||
254 | }; | ||
249 | 255 | ||
250 | /** Additional rings budgeting */ | 256 | /** Additional rings budgeting */ |
251 | #ifdef BCM_CNIC | 257 | #ifdef BCM_CNIC |
@@ -315,6 +321,14 @@ union db_prod { | |||
315 | u32 raw; | 321 | u32 raw; |
316 | }; | 322 | }; |
317 | 323 | ||
324 | /* dropless fc FW/HW related params */ | ||
325 | #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) | ||
326 | #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ | ||
327 | ETH_MAX_AGGREGATION_QUEUES_E1 :\ | ||
328 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2) | ||
329 | #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) | ||
330 | #define FW_PREFETCH_CNT 16 | ||
331 | #define DROPLESS_FC_HEADROOM 100 | ||
318 | 332 | ||
319 | /* MC hsi */ | 333 | /* MC hsi */ |
320 | #define BCM_PAGE_SHIFT 12 | 334 | #define BCM_PAGE_SHIFT 12 |
@@ -331,15 +345,35 @@ union db_prod { | |||
331 | /* SGE ring related macros */ | 345 | /* SGE ring related macros */ |
332 | #define NUM_RX_SGE_PAGES 2 | 346 | #define NUM_RX_SGE_PAGES 2 |
333 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) | 347 | #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) |
334 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) | 348 | #define NEXT_PAGE_SGE_DESC_CNT 2 |
349 | #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) | ||
335 | /* RX_SGE_CNT is promised to be a power of 2 */ | 350 | /* RX_SGE_CNT is promised to be a power of 2 */ |
336 | #define RX_SGE_MASK (RX_SGE_CNT - 1) | 351 | #define RX_SGE_MASK (RX_SGE_CNT - 1) |
337 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) | 352 | #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) |
338 | #define MAX_RX_SGE (NUM_RX_SGE - 1) | 353 | #define MAX_RX_SGE (NUM_RX_SGE - 1) |
339 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ | 354 | #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ |
340 | (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) | 355 | (MAX_RX_SGE_CNT - 1)) ? \ |
356 | (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ | ||
357 | (x) + 1) | ||
341 | #define RX_SGE(x) ((x) & MAX_RX_SGE) | 358 | #define RX_SGE(x) ((x) & MAX_RX_SGE) |
342 | 359 | ||
360 | /* | ||
361 | * Number of required SGEs is the sum of two: | ||
362 | * 1. Number of possible opened aggregations (next packet for | ||
363 | * these aggregations will probably consume SGE immidiatelly) | ||
364 | * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only | ||
365 | * after placement on BD for new TPA aggregation) | ||
366 | * | ||
367 | * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page | ||
368 | */ | ||
369 | #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ | ||
370 | (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) | ||
371 | #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ | ||
372 | MAX_RX_SGE_CNT) | ||
373 | #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ | ||
374 | NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) | ||
375 | #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
376 | |||
343 | /* Manipulate a bit vector defined as an array of u64 */ | 377 | /* Manipulate a bit vector defined as an array of u64 */ |
344 | 378 | ||
345 | /* Number of bits in one sge_mask array element */ | 379 | /* Number of bits in one sge_mask array element */ |
@@ -551,24 +585,43 @@ struct bnx2x_fastpath { | |||
551 | 585 | ||
552 | #define NUM_TX_RINGS 16 | 586 | #define NUM_TX_RINGS 16 |
553 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) | 587 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) |
554 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 588 | #define NEXT_PAGE_TX_DESC_CNT 1 |
589 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) | ||
555 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) | 590 | #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) |
556 | #define MAX_TX_BD (NUM_TX_BD - 1) | 591 | #define MAX_TX_BD (NUM_TX_BD - 1) |
557 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) | 592 | #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) |
558 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ | 593 | #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ |
559 | (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 594 | (MAX_TX_DESC_CNT - 1)) ? \ |
595 | (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ | ||
596 | (x) + 1) | ||
560 | #define TX_BD(x) ((x) & MAX_TX_BD) | 597 | #define TX_BD(x) ((x) & MAX_TX_BD) |
561 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) | 598 | #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) |
562 | 599 | ||
563 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ | 600 | /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ |
564 | #define NUM_RX_RINGS 8 | 601 | #define NUM_RX_RINGS 8 |
565 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) | 602 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) |
566 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) | 603 | #define NEXT_PAGE_RX_DESC_CNT 2 |
604 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) | ||
567 | #define RX_DESC_MASK (RX_DESC_CNT - 1) | 605 | #define RX_DESC_MASK (RX_DESC_CNT - 1) |
568 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) | 606 | #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) |
569 | #define MAX_RX_BD (NUM_RX_BD - 1) | 607 | #define MAX_RX_BD (NUM_RX_BD - 1) |
570 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) | 608 | #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) |
571 | #define MIN_RX_AVAIL 128 | 609 | |
610 | /* dropless fc calculations for BDs | ||
611 | * | ||
612 | * Number of BDs should as number of buffers in BRB: | ||
613 | * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT | ||
614 | * "next" elements on each page | ||
615 | */ | ||
616 | #define NUM_BD_REQ BRB_SIZE(bp) | ||
617 | #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ | ||
618 | MAX_RX_DESC_CNT) | ||
619 | #define BD_TH_LO(bp) (NUM_BD_REQ + \ | ||
620 | NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ | ||
621 | FW_DROP_LEVEL(bp)) | ||
622 | #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
623 | |||
624 | #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) | ||
572 | 625 | ||
573 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ | 626 | #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ |
574 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ | 627 | ETH_MIN_RX_CQES_WITH_TPA_E1 : \ |
@@ -579,7 +632,9 @@ struct bnx2x_fastpath { | |||
579 | MIN_RX_AVAIL)) | 632 | MIN_RX_AVAIL)) |
580 | 633 | ||
581 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ | 634 | #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ |
582 | (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) | 635 | (MAX_RX_DESC_CNT - 1)) ? \ |
636 | (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ | ||
637 | (x) + 1) | ||
583 | #define RX_BD(x) ((x) & MAX_RX_BD) | 638 | #define RX_BD(x) ((x) & MAX_RX_BD) |
584 | 639 | ||
585 | /* | 640 | /* |
@@ -589,14 +644,31 @@ struct bnx2x_fastpath { | |||
589 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) | 644 | #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) |
590 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) | 645 | #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) |
591 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) | 646 | #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) |
592 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) | 647 | #define NEXT_PAGE_RCQ_DESC_CNT 1 |
648 | #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) | ||
593 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) | 649 | #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) |
594 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) | 650 | #define MAX_RCQ_BD (NUM_RCQ_BD - 1) |
595 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) | 651 | #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) |
596 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ | 652 | #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ |
597 | (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) | 653 | (MAX_RCQ_DESC_CNT - 1)) ? \ |
654 | (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ | ||
655 | (x) + 1) | ||
598 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) | 656 | #define RCQ_BD(x) ((x) & MAX_RCQ_BD) |
599 | 657 | ||
658 | /* dropless fc calculations for RCQs | ||
659 | * | ||
660 | * Number of RCQs should be as number of buffers in BRB: | ||
661 | * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT | ||
662 | * "next" elements on each page | ||
663 | */ | ||
664 | #define NUM_RCQ_REQ BRB_SIZE(bp) | ||
665 | #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ | ||
666 | MAX_RCQ_DESC_CNT) | ||
667 | #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ | ||
668 | NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ | ||
669 | FW_DROP_LEVEL(bp)) | ||
670 | #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) | ||
671 | |||
600 | 672 | ||
601 | /* This is needed for determining of last_max */ | 673 | /* This is needed for determining of last_max */ |
602 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) | 674 | #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) |
@@ -685,24 +757,17 @@ struct bnx2x_fastpath { | |||
685 | #define FP_CSB_FUNC_OFF \ | 757 | #define FP_CSB_FUNC_OFF \ |
686 | offsetof(struct cstorm_status_block_c, func) | 758 | offsetof(struct cstorm_status_block_c, func) |
687 | 759 | ||
688 | #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ | 760 | #define HC_INDEX_ETH_RX_CQ_CONS 1 |
689 | /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ | ||
690 | #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ | ||
691 | /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ | ||
692 | #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ | ||
693 | /* (HC_INDEX_U_ETH_RX_BD_CONS) */ | ||
694 | |||
695 | #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ | ||
696 | /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ | ||
697 | #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ | ||
698 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
699 | #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ | ||
700 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
701 | #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ | ||
702 | /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ | ||
703 | 761 | ||
704 | #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 | 762 | #define HC_INDEX_OOO_TX_CQ_CONS 4 |
763 | |||
764 | #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 | ||
765 | |||
766 | #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 | ||
705 | 767 | ||
768 | #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 | ||
769 | |||
770 | #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 | ||
706 | 771 | ||
707 | #define BNX2X_RX_SB_INDEX \ | 772 | #define BNX2X_RX_SB_INDEX \ |
708 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) | 773 | (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) |
@@ -1100,11 +1165,12 @@ struct bnx2x { | |||
1100 | #define BP_PORT(bp) (bp->pfid & 1) | 1165 | #define BP_PORT(bp) (bp->pfid & 1) |
1101 | #define BP_FUNC(bp) (bp->pfid) | 1166 | #define BP_FUNC(bp) (bp->pfid) |
1102 | #define BP_ABS_FUNC(bp) (bp->pf_num) | 1167 | #define BP_ABS_FUNC(bp) (bp->pf_num) |
1103 | #define BP_E1HVN(bp) (bp->pfid >> 1) | 1168 | #define BP_VN(bp) ((bp)->pfid >> 1) |
1104 | #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ | 1169 | #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) |
1105 | #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) | 1170 | #define BP_L_ID(bp) (BP_VN(bp) << 2) |
1106 | #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ | 1171 | #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ |
1107 | BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) | 1172 | (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) |
1173 | #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) | ||
1108 | 1174 | ||
1109 | struct net_device *dev; | 1175 | struct net_device *dev; |
1110 | struct pci_dev *pdev; | 1176 | struct pci_dev *pdev; |
@@ -1767,7 +1833,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1767 | 1833 | ||
1768 | #define MAX_DMAE_C_PER_PORT 8 | 1834 | #define MAX_DMAE_C_PER_PORT 8 |
1769 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 1835 | #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
1770 | BP_E1HVN(bp)) | 1836 | BP_VN(bp)) |
1771 | #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ | 1837 | #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ |
1772 | E1HVN_MAX) | 1838 | E1HVN_MAX) |
1773 | 1839 | ||
@@ -1793,7 +1859,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1793 | 1859 | ||
1794 | /* must be used on a CID before placing it on a HW ring */ | 1860 | /* must be used on a CID before placing it on a HW ring */ |
1795 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ | 1861 | #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ |
1796 | (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ | 1862 | (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ |
1797 | (x)) | 1863 | (x)) |
1798 | 1864 | ||
1799 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) | 1865 | #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index d724a18b5285..c4cbf9736414 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) | |||
63 | fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); | 63 | fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); |
64 | 64 | ||
65 | #ifdef BCM_CNIC | 65 | #ifdef BCM_CNIC |
66 | /* We don't want TPA on FCoE, FWD and OOO L2 rings */ | 66 | /* We don't want TPA on an FCoE L2 ring */ |
67 | bnx2x_fcoe(bp, disable_tpa) = 1; | 67 | if (IS_FCOE_FP(fp)) |
68 | fp->disable_tpa = 1; | ||
68 | #endif | 69 | #endif |
69 | } | 70 | } |
70 | 71 | ||
@@ -986,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
986 | void bnx2x_init_rx_rings(struct bnx2x *bp) | 987 | void bnx2x_init_rx_rings(struct bnx2x *bp) |
987 | { | 988 | { |
988 | int func = BP_FUNC(bp); | 989 | int func = BP_FUNC(bp); |
989 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
990 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2; | ||
991 | u16 ring_prod; | 990 | u16 ring_prod; |
992 | int i, j; | 991 | int i, j; |
993 | 992 | ||
@@ -1000,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1000 | 999 | ||
1001 | if (!fp->disable_tpa) { | 1000 | if (!fp->disable_tpa) { |
1002 | /* Fill the per-aggregtion pool */ | 1001 | /* Fill the per-aggregtion pool */ |
1003 | for (i = 0; i < max_agg_queues; i++) { | 1002 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1004 | struct bnx2x_agg_info *tpa_info = | 1003 | struct bnx2x_agg_info *tpa_info = |
1005 | &fp->tpa_info[i]; | 1004 | &fp->tpa_info[i]; |
1006 | struct sw_rx_bd *first_buf = | 1005 | struct sw_rx_bd *first_buf = |
@@ -1040,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1040 | bnx2x_free_rx_sge_range(bp, fp, | 1039 | bnx2x_free_rx_sge_range(bp, fp, |
1041 | ring_prod); | 1040 | ring_prod); |
1042 | bnx2x_free_tpa_pool(bp, fp, | 1041 | bnx2x_free_tpa_pool(bp, fp, |
1043 | max_agg_queues); | 1042 | MAX_AGG_QS(bp)); |
1044 | fp->disable_tpa = 1; | 1043 | fp->disable_tpa = 1; |
1045 | ring_prod = 0; | 1044 | ring_prod = 0; |
1046 | break; | 1045 | break; |
@@ -1136,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
1136 | bnx2x_free_rx_bds(fp); | 1135 | bnx2x_free_rx_bds(fp); |
1137 | 1136 | ||
1138 | if (!fp->disable_tpa) | 1137 | if (!fp->disable_tpa) |
1139 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | 1138 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
1140 | ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
1141 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
1142 | } | 1139 | } |
1143 | } | 1140 | } |
1144 | 1141 | ||
@@ -1404,10 +1401,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1404 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1401 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) |
1405 | { | 1402 | { |
1406 | struct bnx2x *bp = netdev_priv(dev); | 1403 | struct bnx2x *bp = netdev_priv(dev); |
1404 | |||
1407 | #ifdef BCM_CNIC | 1405 | #ifdef BCM_CNIC |
1408 | if (NO_FCOE(bp)) | 1406 | if (!NO_FCOE(bp)) { |
1409 | return skb_tx_hash(dev, skb); | ||
1410 | else { | ||
1411 | struct ethhdr *hdr = (struct ethhdr *)skb->data; | 1407 | struct ethhdr *hdr = (struct ethhdr *)skb->data; |
1412 | u16 ether_type = ntohs(hdr->h_proto); | 1408 | u16 ether_type = ntohs(hdr->h_proto); |
1413 | 1409 | ||
@@ -1424,8 +1420,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
1424 | return bnx2x_fcoe_tx(bp, txq_index); | 1420 | return bnx2x_fcoe_tx(bp, txq_index); |
1425 | } | 1421 | } |
1426 | #endif | 1422 | #endif |
1427 | /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring | 1423 | /* select a non-FCoE queue */ |
1428 | */ | ||
1429 | return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); | 1424 | return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); |
1430 | } | 1425 | } |
1431 | 1426 | ||
@@ -1448,6 +1443,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp) | |||
1448 | bp->num_queues += NON_ETH_CONTEXT_USE; | 1443 | bp->num_queues += NON_ETH_CONTEXT_USE; |
1449 | } | 1444 | } |
1450 | 1445 | ||
1446 | /** | ||
1447 | * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues | ||
1448 | * | ||
1449 | * @bp: Driver handle | ||
1450 | * | ||
1451 | * We currently support for at most 16 Tx queues for each CoS thus we will | ||
1452 | * allocate a multiple of 16 for ETH L2 rings according to the value of the | ||
1453 | * bp->max_cos. | ||
1454 | * | ||
1455 | * If there is an FCoE L2 queue the appropriate Tx queue will have the next | ||
1456 | * index after all ETH L2 indices. | ||
1457 | * | ||
1458 | * If the actual number of Tx queues (for each CoS) is less than 16 then there | ||
1459 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, | ||
1460 | * 16..31,...) with indicies that are not coupled with any real Tx queue. | ||
1461 | * | ||
1462 | * The proper configuration of skb->queue_mapping is handled by | ||
1463 | * bnx2x_select_queue() and __skb_tx_hash(). | ||
1464 | * | ||
1465 | * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() | ||
1466 | * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). | ||
1467 | */ | ||
1451 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | 1468 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) |
1452 | { | 1469 | { |
1453 | int rc, tx, rx; | 1470 | int rc, tx, rx; |
@@ -3074,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | |||
3074 | struct bnx2x_fastpath *fp = &bp->fp[index]; | 3091 | struct bnx2x_fastpath *fp = &bp->fp[index]; |
3075 | int ring_size = 0; | 3092 | int ring_size = 0; |
3076 | u8 cos; | 3093 | u8 cos; |
3094 | int rx_ring_size = 0; | ||
3077 | 3095 | ||
3078 | /* if rx_ring_size specified - use it */ | 3096 | /* if rx_ring_size specified - use it */ |
3079 | int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : | 3097 | if (!bp->rx_ring_size) { |
3080 | MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); | ||
3081 | 3098 | ||
3082 | /* allocate at least number of buffers required by FW */ | 3099 | rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); |
3083 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : | 3100 | |
3084 | MIN_RX_SIZE_TPA, | 3101 | /* allocate at least number of buffers required by FW */ |
3085 | rx_ring_size); | 3102 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : |
3103 | MIN_RX_SIZE_TPA, rx_ring_size); | ||
3104 | |||
3105 | bp->rx_ring_size = rx_ring_size; | ||
3106 | } else | ||
3107 | rx_ring_size = bp->rx_ring_size; | ||
3086 | 3108 | ||
3087 | /* Common */ | 3109 | /* Common */ |
3088 | sb = &bnx2x_fp(bp, index, status_blk); | 3110 | sb = &bnx2x_fp(bp, index, status_blk); |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 223bfeebc597..2dc1199239d0 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, | |||
1297 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) | 1297 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) |
1298 | { | 1298 | { |
1299 | return bp->cnic_base_cl_id + cl_idx + | 1299 | return bp->cnic_base_cl_id + cl_idx + |
1300 | (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; | 1300 | (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) | 1303 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a4ea35f6a456..0b4acf67e0c6 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, | |||
920 | 920 | ||
921 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) | 921 | void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) |
922 | { | 922 | { |
923 | if (!CHIP_IS_E1x(bp)) { | 923 | if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { |
924 | bp->dcb_state = dcb_on; | 924 | bp->dcb_state = dcb_on; |
925 | bp->dcbx_enabled = dcbx_enabled; | 925 | bp->dcbx_enabled = dcbx_enabled; |
926 | } else { | 926 | } else { |
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2120 | break; | 2120 | break; |
2121 | case DCB_CAP_ATTR_DCBX: | 2121 | case DCB_CAP_ATTR_DCBX: |
2122 | *cap = BNX2X_DCBX_CAPS; | 2122 | *cap = BNX2X_DCBX_CAPS; |
2123 | break; | ||
2123 | default: | 2124 | default: |
2124 | rval = -EINVAL; | 2125 | rval = -EINVAL; |
2125 | break; | 2126 | break; |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 221863059dae..cf3e47914dd7 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
363 | } | 363 | } |
364 | 364 | ||
365 | /* advertise the requested speed and duplex if supported */ | 365 | /* advertise the requested speed and duplex if supported */ |
366 | cmd->advertising &= bp->port.supported[cfg_idx]; | 366 | if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { |
367 | DP(NETIF_MSG_LINK, "Advertisement parameters " | ||
368 | "are not supported\n"); | ||
369 | return -EINVAL; | ||
370 | } | ||
367 | 371 | ||
368 | bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; | 372 | bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; |
369 | bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; | 373 | bp->link_params.req_duplex[cfg_idx] = cmd->duplex; |
370 | bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | | 374 | bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | |
371 | cmd->advertising); | 375 | cmd->advertising); |
376 | if (cmd->advertising) { | ||
377 | |||
378 | bp->link_params.speed_cap_mask[cfg_idx] = 0; | ||
379 | if (cmd->advertising & ADVERTISED_10baseT_Half) { | ||
380 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
381 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; | ||
382 | } | ||
383 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
384 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
385 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; | ||
372 | 386 | ||
387 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
388 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
389 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; | ||
390 | |||
391 | if (cmd->advertising & ADVERTISED_100baseT_Half) { | ||
392 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
393 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; | ||
394 | } | ||
395 | if (cmd->advertising & ADVERTISED_1000baseT_Half) { | ||
396 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
397 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; | ||
398 | } | ||
399 | if (cmd->advertising & (ADVERTISED_1000baseT_Full | | ||
400 | ADVERTISED_1000baseKX_Full)) | ||
401 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
402 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; | ||
403 | |||
404 | if (cmd->advertising & (ADVERTISED_10000baseT_Full | | ||
405 | ADVERTISED_10000baseKX4_Full | | ||
406 | ADVERTISED_10000baseKR_Full)) | ||
407 | bp->link_params.speed_cap_mask[cfg_idx] |= | ||
408 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; | ||
409 | } | ||
373 | } else { /* forced speed */ | 410 | } else { /* forced speed */ |
374 | /* advertise the requested speed and duplex if supported */ | 411 | /* advertise the requested speed and duplex if supported */ |
375 | switch (speed) { | 412 | switch (speed) { |
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, | |||
1310 | if (bp->rx_ring_size) | 1347 | if (bp->rx_ring_size) |
1311 | ering->rx_pending = bp->rx_ring_size; | 1348 | ering->rx_pending = bp->rx_ring_size; |
1312 | else | 1349 | else |
1313 | if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) | 1350 | ering->rx_pending = MAX_RX_AVAIL; |
1314 | ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; | ||
1315 | else | ||
1316 | ering->rx_pending = MAX_RX_AVAIL; | ||
1317 | 1351 | ||
1318 | ering->rx_mini_pending = 0; | 1352 | ering->rx_mini_pending = 0; |
1319 | ering->rx_jumbo_pending = 0; | 1353 | ering->rx_jumbo_pending = 0; |
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index d45b1555a602..ba15bdc5a1a9 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, | |||
778 | { | 778 | { |
779 | u32 nig_reg_adress_crd_weight = 0; | 779 | u32 nig_reg_adress_crd_weight = 0; |
780 | u32 pbf_reg_adress_crd_weight = 0; | 780 | u32 pbf_reg_adress_crd_weight = 0; |
781 | /* Calculate and set BW for this COS*/ | 781 | /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ |
782 | const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; | 782 | const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; |
783 | const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; | 783 | const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; |
784 | 784 | ||
785 | switch (cos_entry) { | 785 | switch (cos_entry) { |
786 | case 0: | 786 | case 0: |
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw( | |||
852 | /* Calculate total BW requested */ | 852 | /* Calculate total BW requested */ |
853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { | 853 | for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { |
854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { | 854 | if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { |
855 | 855 | *total_bw += | |
856 | if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { | 856 | ets_params->cos[cos_idx].params.bw_params.bw; |
857 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" | ||
858 | "was set to 0\n"); | ||
859 | return -EINVAL; | ||
860 | } | 857 | } |
861 | *total_bw += | ||
862 | ets_params->cos[cos_idx].params.bw_params.bw; | ||
863 | } | ||
864 | } | 858 | } |
865 | 859 | ||
866 | /*Check taotl BW is valid */ | 860 | /* Check total BW is valid */ |
867 | if ((100 != *total_bw) || (0 == *total_bw)) { | 861 | if ((100 != *total_bw) || (0 == *total_bw)) { |
868 | if (0 == *total_bw) { | 862 | if (0 == *total_bw) { |
869 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" | 863 | DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" |
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params, | |||
1726 | 1720 | ||
1727 | /* Check loopback mode */ | 1721 | /* Check loopback mode */ |
1728 | if (lb) | 1722 | if (lb) |
1729 | val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; | 1723 | val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; |
1730 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); | 1724 | REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); |
1731 | bnx2x_set_xumac_nig(params, | 1725 | bnx2x_set_xumac_nig(params, |
1732 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); | 1726 | ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); |
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3630 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | 3624 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, |
3631 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); | 3625 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); |
3632 | 3626 | ||
3627 | /* Advertised and set FEC (Forward Error Correction) */ | ||
3628 | bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, | ||
3629 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, | ||
3630 | (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | | ||
3631 | MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); | ||
3632 | |||
3633 | /* Enable CL37 BAM */ | 3633 | /* Enable CL37 BAM */ |
3634 | if (REG_RD(bp, params->shmem_base + | 3634 | if (REG_RD(bp, params->shmem_base + |
3635 | offsetof(struct shmem_region, dev_info. | 3635 | offsetof(struct shmem_region, dev_info. |
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params, | |||
5924 | (tmp | EMAC_LED_OVERRIDE)); | 5924 | (tmp | EMAC_LED_OVERRIDE)); |
5925 | /* | 5925 | /* |
5926 | * return here without enabling traffic | 5926 | * return here without enabling traffic |
5927 | * LED blink andsetting rate in ON mode. | 5927 | * LED blink and setting rate in ON mode. |
5928 | * In oper mode, enabling LED blink | 5928 | * In oper mode, enabling LED blink |
5929 | * and setting rate is needed. | 5929 | * and setting rate is needed. |
5930 | */ | 5930 | */ |
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params, | |||
5936 | * This is a work-around for HW issue found when link | 5936 | * This is a work-around for HW issue found when link |
5937 | * is up in CL73 | 5937 | * is up in CL73 |
5938 | */ | 5938 | */ |
5939 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | 5939 | if ((!CHIP_IS_E3(bp)) || |
5940 | (CHIP_IS_E3(bp) && | ||
5941 | mode == LED_MODE_ON)) | ||
5942 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | ||
5943 | |||
5940 | if (CHIP_IS_E1x(bp) || | 5944 | if (CHIP_IS_E1x(bp) || |
5941 | CHIP_IS_E2(bp) || | 5945 | CHIP_IS_E2(bp) || |
5942 | (mode == LED_MODE_ON)) | 5946 | (mode == LED_MODE_ON)) |
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = { | |||
10638 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, | 10642 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, |
10639 | .addr = 0xff, | 10643 | .addr = 0xff, |
10640 | .def_md_devad = 0, | 10644 | .def_md_devad = 0, |
10641 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10645 | .flags = FLAGS_HW_LOCK_REQUIRED, |
10642 | FLAGS_TX_ERROR_CHECK), | ||
10643 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10646 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10644 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10647 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10645 | .mdio_ctrl = 0, | 10648 | .mdio_ctrl = 0, |
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = { | |||
10765 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, | 10768 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, |
10766 | .addr = 0xff, | 10769 | .addr = 0xff, |
10767 | .def_md_devad = 0, | 10770 | .def_md_devad = 0, |
10768 | .flags = (FLAGS_INIT_XGXS_FIRST | | 10771 | .flags = FLAGS_INIT_XGXS_FIRST, |
10769 | FLAGS_TX_ERROR_CHECK), | ||
10770 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10772 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10771 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10773 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10772 | .mdio_ctrl = 0, | 10774 | .mdio_ctrl = 0, |
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = { | |||
10797 | .addr = 0xff, | 10799 | .addr = 0xff, |
10798 | .def_md_devad = 0, | 10800 | .def_md_devad = 0, |
10799 | .flags = (FLAGS_HW_LOCK_REQUIRED | | 10801 | .flags = (FLAGS_HW_LOCK_REQUIRED | |
10800 | FLAGS_INIT_XGXS_FIRST | | 10802 | FLAGS_INIT_XGXS_FIRST), |
10801 | FLAGS_TX_ERROR_CHECK), | ||
10802 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10803 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10803 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10804 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10804 | .mdio_ctrl = 0, | 10805 | .mdio_ctrl = 0, |
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = { | |||
10829 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, | 10830 | .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, |
10830 | .addr = 0xff, | 10831 | .addr = 0xff, |
10831 | .def_md_devad = 0, | 10832 | .def_md_devad = 0, |
10832 | .flags = (FLAGS_FAN_FAILURE_DET_REQ | | 10833 | .flags = FLAGS_FAN_FAILURE_DET_REQ, |
10833 | FLAGS_TX_ERROR_CHECK), | ||
10834 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10834 | .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10835 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, | 10835 | .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, |
10836 | .mdio_ctrl = 0, | 10836 | .mdio_ctrl = 0, |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 150709111548..15f800085bb2 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, | |||
407 | opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); | 407 | opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); |
408 | 408 | ||
409 | opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); | 409 | opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); |
410 | opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | | 410 | opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | |
411 | (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); | 411 | (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); |
412 | opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); | 412 | opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); |
413 | 413 | ||
414 | #ifdef __BIG_ENDIAN | 414 | #ifdef __BIG_ENDIAN |
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) | |||
1419 | if (!CHIP_IS_E1(bp)) { | 1419 | if (!CHIP_IS_E1(bp)) { |
1420 | /* init leading/trailing edge */ | 1420 | /* init leading/trailing edge */ |
1421 | if (IS_MF(bp)) { | 1421 | if (IS_MF(bp)) { |
1422 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); | 1422 | val = (0xee0f | (1 << (BP_VN(bp) + 4))); |
1423 | if (bp->port.pmf) | 1423 | if (bp->port.pmf) |
1424 | /* enable nig and gpio3 attention */ | 1424 | /* enable nig and gpio3 attention */ |
1425 | val |= 0x1100; | 1425 | val |= 0x1100; |
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) | |||
1471 | 1471 | ||
1472 | /* init leading/trailing edge */ | 1472 | /* init leading/trailing edge */ |
1473 | if (IS_MF(bp)) { | 1473 | if (IS_MF(bp)) { |
1474 | val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); | 1474 | val = (0xee0f | (1 << (BP_VN(bp) + 4))); |
1475 | if (bp->port.pmf) | 1475 | if (bp->port.pmf) |
1476 | /* enable nig and gpio3 attention */ | 1476 | /* enable nig and gpio3 attention */ |
1477 | val |= 0x1100; | 1477 | val |= 0x1100; |
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2287 | int vn; | 2287 | int vn; |
2288 | 2288 | ||
2289 | bp->vn_weight_sum = 0; | 2289 | bp->vn_weight_sum = 0; |
2290 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2290 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2291 | u32 vn_cfg = bp->mf_config[vn]; | 2291 | u32 vn_cfg = bp->mf_config[vn]; |
2292 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 2292 | u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
2293 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 2293 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) | |||
2320 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; | 2320 | CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
2321 | } | 2321 | } |
2322 | 2322 | ||
2323 | /* returns func by VN for current port */ | ||
2324 | static inline int func_by_vn(struct bnx2x *bp, int vn) | ||
2325 | { | ||
2326 | return 2 * vn + BP_PORT(bp); | ||
2327 | } | ||
2328 | |||
2323 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | 2329 | static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) |
2324 | { | 2330 | { |
2325 | struct rate_shaping_vars_per_vn m_rs_vn; | 2331 | struct rate_shaping_vars_per_vn m_rs_vn; |
2326 | struct fairness_vars_per_vn m_fair_vn; | 2332 | struct fairness_vars_per_vn m_fair_vn; |
2327 | u32 vn_cfg = bp->mf_config[vn]; | 2333 | u32 vn_cfg = bp->mf_config[vn]; |
2328 | int func = 2*vn + BP_PORT(bp); | 2334 | int func = func_by_vn(bp, vn); |
2329 | u16 vn_min_rate, vn_max_rate; | 2335 | u16 vn_min_rate, vn_max_rate; |
2330 | int i; | 2336 | int i; |
2331 | 2337 | ||
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) | |||
2422 | * | 2428 | * |
2423 | * and there are 2 functions per port | 2429 | * and there are 2 functions per port |
2424 | */ | 2430 | */ |
2425 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2431 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2426 | int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); | 2432 | int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); |
2427 | 2433 | ||
2428 | if (func >= E1H_FUNC_MAX) | 2434 | if (func >= E1H_FUNC_MAX) |
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2454 | 2460 | ||
2455 | /* calculate and set min-max rate for each vn */ | 2461 | /* calculate and set min-max rate for each vn */ |
2456 | if (bp->port.pmf) | 2462 | if (bp->port.pmf) |
2457 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | 2463 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) |
2458 | bnx2x_init_vn_minmax(bp, vn); | 2464 | bnx2x_init_vn_minmax(bp, vn); |
2459 | 2465 | ||
2460 | /* always enable rate shaping and fairness */ | 2466 | /* always enable rate shaping and fairness */ |
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2473 | 2479 | ||
2474 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) | 2480 | static inline void bnx2x_link_sync_notify(struct bnx2x *bp) |
2475 | { | 2481 | { |
2476 | int port = BP_PORT(bp); | ||
2477 | int func; | 2482 | int func; |
2478 | int vn; | 2483 | int vn; |
2479 | 2484 | ||
2480 | /* Set the attention towards other drivers on the same port */ | 2485 | /* Set the attention towards other drivers on the same port */ |
2481 | for (vn = VN_0; vn < E1HVN_MAX; vn++) { | 2486 | for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { |
2482 | if (vn == BP_E1HVN(bp)) | 2487 | if (vn == BP_VN(bp)) |
2483 | continue; | 2488 | continue; |
2484 | 2489 | ||
2485 | func = ((vn << 1) | port); | 2490 | func = func_by_vn(bp, vn); |
2486 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + | 2491 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + |
2487 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); | 2492 | (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); |
2488 | } | 2493 | } |
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) | |||
2577 | bnx2x_dcbx_pmf_update(bp); | 2582 | bnx2x_dcbx_pmf_update(bp); |
2578 | 2583 | ||
2579 | /* enable nig attention */ | 2584 | /* enable nig attention */ |
2580 | val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); | 2585 | val = (0xff0f | (1 << (BP_VN(bp) + 4))); |
2581 | if (bp->common.int_block == INT_BLOCK_HC) { | 2586 | if (bp->common.int_block == INT_BLOCK_HC) { |
2582 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); | 2587 | REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); |
2583 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); | 2588 | REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); |
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2756 | u16 tpa_agg_size = 0; | 2761 | u16 tpa_agg_size = 0; |
2757 | 2762 | ||
2758 | if (!fp->disable_tpa) { | 2763 | if (!fp->disable_tpa) { |
2759 | pause->sge_th_hi = 250; | 2764 | pause->sge_th_lo = SGE_TH_LO(bp); |
2760 | pause->sge_th_lo = 150; | 2765 | pause->sge_th_hi = SGE_TH_HI(bp); |
2766 | |||
2767 | /* validate SGE ring has enough to cross high threshold */ | ||
2768 | WARN_ON(bp->dropless_fc && | ||
2769 | pause->sge_th_hi + FW_PREFETCH_CNT > | ||
2770 | MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); | ||
2771 | |||
2761 | tpa_agg_size = min_t(u32, | 2772 | tpa_agg_size = min_t(u32, |
2762 | (min_t(u32, 8, MAX_SKB_FRAGS) * | 2773 | (min_t(u32, 8, MAX_SKB_FRAGS) * |
2763 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); | 2774 | SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); |
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2771 | 2782 | ||
2772 | /* pause - not for e1 */ | 2783 | /* pause - not for e1 */ |
2773 | if (!CHIP_IS_E1(bp)) { | 2784 | if (!CHIP_IS_E1(bp)) { |
2774 | pause->bd_th_hi = 350; | 2785 | pause->bd_th_lo = BD_TH_LO(bp); |
2775 | pause->bd_th_lo = 250; | 2786 | pause->bd_th_hi = BD_TH_HI(bp); |
2776 | pause->rcq_th_hi = 350; | 2787 | |
2777 | pause->rcq_th_lo = 250; | 2788 | pause->rcq_th_lo = RCQ_TH_LO(bp); |
2789 | pause->rcq_th_hi = RCQ_TH_HI(bp); | ||
2790 | /* | ||
2791 | * validate that rings have enough entries to cross | ||
2792 | * high thresholds | ||
2793 | */ | ||
2794 | WARN_ON(bp->dropless_fc && | ||
2795 | pause->bd_th_hi + FW_PREFETCH_CNT > | ||
2796 | bp->rx_ring_size); | ||
2797 | WARN_ON(bp->dropless_fc && | ||
2798 | pause->rcq_th_hi + FW_PREFETCH_CNT > | ||
2799 | NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); | ||
2778 | 2800 | ||
2779 | pause->pri_map = 1; | 2801 | pause->pri_map = 1; |
2780 | } | 2802 | } |
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
2802 | * For PF Clients it should be the maximum avaliable number. | 2824 | * For PF Clients it should be the maximum avaliable number. |
2803 | * VF driver(s) may want to define it to a smaller value. | 2825 | * VF driver(s) may want to define it to a smaller value. |
2804 | */ | 2826 | */ |
2805 | rxq_init->max_tpa_queues = | 2827 | rxq_init->max_tpa_queues = MAX_AGG_QS(bp); |
2806 | (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | ||
2807 | ETH_MAX_AGGREGATION_QUEUES_E1H_E2); | ||
2808 | 2828 | ||
2809 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; | 2829 | rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; |
2810 | rxq_init->fw_sb_id = fp->fw_sb_id; | 2830 | rxq_init->fw_sb_id = fp->fw_sb_id; |
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, | |||
4808 | hc_sm->time_to_expire = 0xFFFFFFFF; | 4828 | hc_sm->time_to_expire = 0xFFFFFFFF; |
4809 | } | 4829 | } |
4810 | 4830 | ||
4831 | |||
4832 | /* allocates state machine ids. */ | ||
4833 | static inline | ||
4834 | void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) | ||
4835 | { | ||
4836 | /* zero out state machine indices */ | ||
4837 | /* rx indices */ | ||
4838 | index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4839 | |||
4840 | /* tx indices */ | ||
4841 | index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4842 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4843 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4844 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; | ||
4845 | |||
4846 | /* map indices */ | ||
4847 | /* rx indices */ | ||
4848 | index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= | ||
4849 | SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4850 | |||
4851 | /* tx indices */ | ||
4852 | index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= | ||
4853 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4854 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= | ||
4855 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4856 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= | ||
4857 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4858 | index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= | ||
4859 | SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; | ||
4860 | } | ||
4861 | |||
4811 | static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | 4862 | static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, |
4812 | u8 vf_valid, int fw_sb_id, int igu_sb_id) | 4863 | u8 vf_valid, int fw_sb_id, int igu_sb_id) |
4813 | { | 4864 | { |
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4839 | hc_sm_p = sb_data_e2.common.state_machine; | 4890 | hc_sm_p = sb_data_e2.common.state_machine; |
4840 | sb_data_p = (u32 *)&sb_data_e2; | 4891 | sb_data_p = (u32 *)&sb_data_e2; |
4841 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); | 4892 | data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); |
4893 | bnx2x_map_sb_state_machines(sb_data_e2.index_data); | ||
4842 | } else { | 4894 | } else { |
4843 | memset(&sb_data_e1x, 0, | 4895 | memset(&sb_data_e1x, 0, |
4844 | sizeof(struct hc_status_block_data_e1x)); | 4896 | sizeof(struct hc_status_block_data_e1x)); |
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, | |||
4853 | hc_sm_p = sb_data_e1x.common.state_machine; | 4905 | hc_sm_p = sb_data_e1x.common.state_machine; |
4854 | sb_data_p = (u32 *)&sb_data_e1x; | 4906 | sb_data_p = (u32 *)&sb_data_e1x; |
4855 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); | 4907 | data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); |
4908 | bnx2x_map_sb_state_machines(sb_data_e1x.index_data); | ||
4856 | } | 4909 | } |
4857 | 4910 | ||
4858 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], | 4911 | bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], |
@@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4890 | int igu_seg_id; | 4943 | int igu_seg_id; |
4891 | int port = BP_PORT(bp); | 4944 | int port = BP_PORT(bp); |
4892 | int func = BP_FUNC(bp); | 4945 | int func = BP_FUNC(bp); |
4893 | int reg_offset; | 4946 | int reg_offset, reg_offset_en5; |
4894 | u64 section; | 4947 | u64 section; |
4895 | int index; | 4948 | int index; |
4896 | struct hc_sp_status_block_data sp_sb_data; | 4949 | struct hc_sp_status_block_data sp_sb_data; |
@@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4913 | 4966 | ||
4914 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4915 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4969 | reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : | ||
4970 | MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); | ||
4916 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 4971 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
4917 | int sindex; | 4972 | int sindex; |
4918 | /* take care of sig[0]..sig[4] */ | 4973 | /* take care of sig[0]..sig[4] */ |
@@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4927 | * and not 16 between the different groups | 4982 | * and not 16 between the different groups |
4928 | */ | 4983 | */ |
4929 | bp->attn_group[index].sig[4] = REG_RD(bp, | 4984 | bp->attn_group[index].sig[4] = REG_RD(bp, |
4930 | reg_offset + 0x10 + 0x4*index); | 4985 | reg_offset_en5 + 0x4*index); |
4931 | else | 4986 | else |
4932 | bp->attn_group[index].sig[4] = 0; | 4987 | bp->attn_group[index].sig[4] = 0; |
4933 | } | 4988 | } |
@@ -5798,6 +5853,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5798 | 5853 | ||
5799 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); | 5854 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); |
5800 | 5855 | ||
5856 | /* | ||
5857 | * take the UNDI lock to protect undi_unload flow from accessing | ||
5858 | * registers while we're resetting the chip | ||
5859 | */ | ||
5860 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
5861 | |||
5801 | bnx2x_reset_common(bp); | 5862 | bnx2x_reset_common(bp); |
5802 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5863 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
5803 | 5864 | ||
@@ -5808,6 +5869,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
5808 | } | 5869 | } |
5809 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); | 5870 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); |
5810 | 5871 | ||
5872 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
5873 | |||
5811 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); | 5874 | bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); |
5812 | 5875 | ||
5813 | if (!CHIP_IS_E1x(bp)) { | 5876 | if (!CHIP_IS_E1x(bp)) { |
@@ -6663,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
6663 | if (CHIP_MODE_IS_4_PORT(bp)) | 6726 | if (CHIP_MODE_IS_4_PORT(bp)) |
6664 | dsb_idx = BP_FUNC(bp); | 6727 | dsb_idx = BP_FUNC(bp); |
6665 | else | 6728 | else |
6666 | dsb_idx = BP_E1HVN(bp); | 6729 | dsb_idx = BP_VN(bp); |
6667 | 6730 | ||
6668 | prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? | 6731 | prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? |
6669 | IGU_BC_BASE_DSB_PROD + dsb_idx : | 6732 | IGU_BC_BASE_DSB_PROD + dsb_idx : |
6670 | IGU_NORM_BASE_DSB_PROD + dsb_idx); | 6733 | IGU_NORM_BASE_DSB_PROD + dsb_idx); |
6671 | 6734 | ||
6735 | /* | ||
6736 | * igu prods come in chunks of E1HVN_MAX (4) - | ||
6737 | * does not matters what is the current chip mode | ||
6738 | */ | ||
6672 | for (i = 0; i < (num_segs * E1HVN_MAX); | 6739 | for (i = 0; i < (num_segs * E1HVN_MAX); |
6673 | i += E1HVN_MAX) { | 6740 | i += E1HVN_MAX) { |
6674 | addr = IGU_REG_PROD_CONS_MEMORY + | 6741 | addr = IGU_REG_PROD_CONS_MEMORY + |
@@ -7560,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7560 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 7627 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
7561 | u8 *mac_addr = bp->dev->dev_addr; | 7628 | u8 *mac_addr = bp->dev->dev_addr; |
7562 | u32 val; | 7629 | u32 val; |
7630 | u16 pmc; | ||
7631 | |||
7563 | /* The mac address is written to entries 1-4 to | 7632 | /* The mac address is written to entries 1-4 to |
7564 | preserve entry 0 which is used by the PMF */ | 7633 | * preserve entry 0 which is used by the PMF |
7565 | u8 entry = (BP_E1HVN(bp) + 1)*8; | 7634 | */ |
7635 | u8 entry = (BP_VN(bp) + 1)*8; | ||
7566 | 7636 | ||
7567 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7637 | val = (mac_addr[0] << 8) | mac_addr[1]; |
7568 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); | 7638 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); |
@@ -7571,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7571 | (mac_addr[4] << 8) | mac_addr[5]; | 7641 | (mac_addr[4] << 8) | mac_addr[5]; |
7572 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | 7642 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
7573 | 7643 | ||
7644 | /* Enable the PME and clear the status */ | ||
7645 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); | ||
7646 | pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; | ||
7647 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); | ||
7648 | |||
7574 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7649 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7575 | 7650 | ||
7576 | } else | 7651 | } else |
@@ -8538,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8538 | /* Check if there is any driver already loaded */ | 8613 | /* Check if there is any driver already loaded */ |
8539 | val = REG_RD(bp, MISC_REG_UNPREPARED); | 8614 | val = REG_RD(bp, MISC_REG_UNPREPARED); |
8540 | if (val == 0x1) { | 8615 | if (val == 0x1) { |
8541 | /* Check if it is the UNDI driver | 8616 | |
8617 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
8618 | /* | ||
8619 | * Check if it is the UNDI driver | ||
8542 | * UNDI driver initializes CID offset for normal bell to 0x7 | 8620 | * UNDI driver initializes CID offset for normal bell to 0x7 |
8543 | */ | 8621 | */ |
8544 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
8545 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 8622 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
8546 | if (val == 0x7) { | 8623 | if (val == 0x7) { |
8547 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 8624 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
@@ -8579,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8579 | bnx2x_fw_command(bp, reset_code, 0); | 8656 | bnx2x_fw_command(bp, reset_code, 0); |
8580 | } | 8657 | } |
8581 | 8658 | ||
8582 | /* now it's safe to release the lock */ | ||
8583 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
8584 | |||
8585 | bnx2x_undi_int_disable(bp); | 8659 | bnx2x_undi_int_disable(bp); |
8586 | port = BP_PORT(bp); | 8660 | port = BP_PORT(bp); |
8587 | 8661 | ||
@@ -8631,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
8631 | bp->fw_seq = | 8705 | bp->fw_seq = |
8632 | (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & | 8706 | (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & |
8633 | DRV_MSG_SEQ_NUMBER_MASK); | 8707 | DRV_MSG_SEQ_NUMBER_MASK); |
8634 | } else | 8708 | } |
8635 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 8709 | |
8710 | /* now it's safe to release the lock */ | ||
8711 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
8636 | } | 8712 | } |
8637 | } | 8713 | } |
8638 | 8714 | ||
@@ -8769,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
8769 | static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) | 8845 | static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) |
8770 | { | 8846 | { |
8771 | int pfid = BP_FUNC(bp); | 8847 | int pfid = BP_FUNC(bp); |
8772 | int vn = BP_E1HVN(bp); | ||
8773 | int igu_sb_id; | 8848 | int igu_sb_id; |
8774 | u32 val; | 8849 | u32 val; |
8775 | u8 fid, igu_sb_cnt = 0; | 8850 | u8 fid, igu_sb_cnt = 0; |
8776 | 8851 | ||
8777 | bp->igu_base_sb = 0xff; | 8852 | bp->igu_base_sb = 0xff; |
8778 | if (CHIP_INT_MODE_IS_BC(bp)) { | 8853 | if (CHIP_INT_MODE_IS_BC(bp)) { |
8854 | int vn = BP_VN(bp); | ||
8779 | igu_sb_cnt = bp->igu_sb_cnt; | 8855 | igu_sb_cnt = bp->igu_sb_cnt; |
8780 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * | 8856 | bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * |
8781 | FP_SB_MAX_E1x; | 8857 | FP_SB_MAX_E1x; |
@@ -9408,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9408 | bp->igu_base_sb = 0; | 9484 | bp->igu_base_sb = 0; |
9409 | } else { | 9485 | } else { |
9410 | bp->common.int_block = INT_BLOCK_IGU; | 9486 | bp->common.int_block = INT_BLOCK_IGU; |
9487 | |||
9488 | /* do not allow device reset during IGU info preocessing */ | ||
9489 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
9490 | |||
9411 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); | 9491 | val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); |
9412 | 9492 | ||
9413 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { | 9493 | if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
@@ -9439,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9439 | 9519 | ||
9440 | bnx2x_get_igu_cam_info(bp); | 9520 | bnx2x_get_igu_cam_info(bp); |
9441 | 9521 | ||
9522 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); | ||
9442 | } | 9523 | } |
9443 | 9524 | ||
9444 | /* | 9525 | /* |
@@ -9465,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9465 | 9546 | ||
9466 | bp->mf_ov = 0; | 9547 | bp->mf_ov = 0; |
9467 | bp->mf_mode = 0; | 9548 | bp->mf_mode = 0; |
9468 | vn = BP_E1HVN(bp); | 9549 | vn = BP_VN(bp); |
9469 | 9550 | ||
9470 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { | 9551 | if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { |
9471 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", | 9552 | BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", |
@@ -9585,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) | |||
9585 | /* port info */ | 9666 | /* port info */ |
9586 | bnx2x_get_port_hwinfo(bp); | 9667 | bnx2x_get_port_hwinfo(bp); |
9587 | 9668 | ||
9588 | if (!BP_NOMCP(bp)) { | ||
9589 | bp->fw_seq = | ||
9590 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | ||
9591 | DRV_MSG_SEQ_NUMBER_MASK); | ||
9592 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
9593 | } | ||
9594 | |||
9595 | /* Get MAC addresses */ | 9669 | /* Get MAC addresses */ |
9596 | bnx2x_get_mac_hwinfo(bp); | 9670 | bnx2x_get_mac_hwinfo(bp); |
9597 | 9671 | ||
@@ -9757,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
9757 | if (!BP_NOMCP(bp)) | 9831 | if (!BP_NOMCP(bp)) |
9758 | bnx2x_undi_unload(bp); | 9832 | bnx2x_undi_unload(bp); |
9759 | 9833 | ||
9834 | /* init fw_seq after undi_unload! */ | ||
9835 | if (!BP_NOMCP(bp)) { | ||
9836 | bp->fw_seq = | ||
9837 | (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & | ||
9838 | DRV_MSG_SEQ_NUMBER_MASK); | ||
9839 | BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); | ||
9840 | } | ||
9841 | |||
9760 | if (CHIP_REV_IS_FPGA(bp)) | 9842 | if (CHIP_REV_IS_FPGA(bp)) |
9761 | dev_err(&bp->pdev->dev, "FPGA detected\n"); | 9843 | dev_err(&bp->pdev->dev, "FPGA detected\n"); |
9762 | 9844 | ||
@@ -10251,10 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
10251 | /* clean indirect addresses */ | 10333 | /* clean indirect addresses */ |
10252 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 10334 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
10253 | PCICFG_VENDOR_ID_OFFSET); | 10335 | PCICFG_VENDOR_ID_OFFSET); |
10254 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); | 10336 | /* |
10255 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); | 10337 | * Clean the following indirect addresses for all functions since it |
10256 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); | 10338 | * is not used by the driver. |
10257 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); | 10339 | */ |
10340 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); | ||
10341 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); | ||
10342 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); | ||
10343 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); | ||
10344 | |||
10345 | if (CHIP_IS_E1x(bp)) { | ||
10346 | REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); | ||
10347 | REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); | ||
10348 | REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); | ||
10349 | REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); | ||
10350 | } | ||
10258 | 10351 | ||
10259 | /* | 10352 | /* |
10260 | * Enable internal target-read (in case we are probed after PF FLR). | 10353 | * Enable internal target-read (in case we are probed after PF FLR). |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 27b5ecb11830..fc7bd0f23c0b 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -1384,6 +1384,18 @@ | |||
1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ | 1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ |
1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 | 1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 |
1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 | 1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 |
1387 | /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped | ||
1388 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1389 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1390 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1391 | * parity; [31-10] Reserved; */ | ||
1392 | #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 | ||
1393 | /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped | ||
1394 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1395 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1396 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1397 | * parity; [31-10] Reserved; */ | ||
1398 | #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 | ||
1387 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu | 1399 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu |
1388 | 128 bit vector */ | 1400 | 128 bit vector */ |
1389 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 | 1401 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 |
@@ -3007,11 +3019,27 @@ | |||
3007 | /* [R 6] Debug only: Number of used entries in the data FIFO */ | 3019 | /* [R 6] Debug only: Number of used entries in the data FIFO */ |
3008 | #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c | 3020 | #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c |
3009 | /* [R 7] Debug only: Number of used entries in the header FIFO */ | 3021 | /* [R 7] Debug only: Number of used entries in the header FIFO */ |
3010 | #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 | 3022 | #define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 |
3011 | #define PXP2_REG_PGL_ADDR_88_F0 0x120534 | 3023 | #define PXP2_REG_PGL_ADDR_88_F0 0x120534 |
3012 | #define PXP2_REG_PGL_ADDR_8C_F0 0x120538 | 3024 | /* [R 32] GRC address for configuration access to PCIE config address 0x88. |
3013 | #define PXP2_REG_PGL_ADDR_90_F0 0x12053c | 3025 | * any write to this PCIE address will cause a GRC write access to the |
3014 | #define PXP2_REG_PGL_ADDR_94_F0 0x120540 | 3026 | * address that's in t this register */ |
3027 | #define PXP2_REG_PGL_ADDR_88_F1 0x120544 | ||
3028 | #define PXP2_REG_PGL_ADDR_8C_F0 0x120538 | ||
3029 | /* [R 32] GRC address for configuration access to PCIE config address 0x8c. | ||
3030 | * any write to this PCIE address will cause a GRC write access to the | ||
3031 | * address that's in t this register */ | ||
3032 | #define PXP2_REG_PGL_ADDR_8C_F1 0x120548 | ||
3033 | #define PXP2_REG_PGL_ADDR_90_F0 0x12053c | ||
3034 | /* [R 32] GRC address for configuration access to PCIE config address 0x90. | ||
3035 | * any write to this PCIE address will cause a GRC write access to the | ||
3036 | * address that's in t this register */ | ||
3037 | #define PXP2_REG_PGL_ADDR_90_F1 0x12054c | ||
3038 | #define PXP2_REG_PGL_ADDR_94_F0 0x120540 | ||
3039 | /* [R 32] GRC address for configuration access to PCIE config address 0x94. | ||
3040 | * any write to this PCIE address will cause a GRC write access to the | ||
3041 | * address that's in t this register */ | ||
3042 | #define PXP2_REG_PGL_ADDR_94_F1 0x120550 | ||
3015 | #define PXP2_REG_PGL_CONTROL0 0x120490 | 3043 | #define PXP2_REG_PGL_CONTROL0 0x120490 |
3016 | #define PXP2_REG_PGL_CONTROL1 0x120514 | 3044 | #define PXP2_REG_PGL_CONTROL1 0x120514 |
3017 | #define PXP2_REG_PGL_DEBUG 0x120520 | 3045 | #define PXP2_REG_PGL_DEBUG 0x120520 |
@@ -5304,7 +5332,7 @@ | |||
5304 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 | 5332 | #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 |
5305 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) | 5333 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) |
5306 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) | 5334 | #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) |
5307 | #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) | 5335 | #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) |
5308 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) | 5336 | #define XMAC_CTRL_REG_RX_EN (0x1<<1) |
5309 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) | 5337 | #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) |
5310 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) | 5338 | #define XMAC_CTRL_REG_TX_EN (0x1<<0) |
@@ -5750,7 +5778,7 @@ | |||
5750 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 | 5778 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 |
5751 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 | 5779 | #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 |
5752 | #define HW_LOCK_RESOURCE_SPIO 2 | 5780 | #define HW_LOCK_RESOURCE_SPIO 2 |
5753 | #define HW_LOCK_RESOURCE_UNDI 5 | 5781 | #define HW_LOCK_RESOURCE_RESET 5 |
5754 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) | 5782 | #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) |
5755 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) | 5783 | #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) |
5756 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) | 5784 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) |
@@ -6837,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6837 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 | 6865 | #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 |
6838 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 | 6866 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 |
6839 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 | 6867 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 |
6868 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 | ||
6869 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 | ||
6870 | #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 | ||
6840 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 | 6871 | #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 |
6841 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 | 6872 | #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 |
6842 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e | 6873 | #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e |
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 771f6803b238..9908f2bbcf73 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) | |||
710 | break; | 710 | break; |
711 | 711 | ||
712 | case MAC_TYPE_NONE: /* unreached */ | 712 | case MAC_TYPE_NONE: /* unreached */ |
713 | BNX2X_ERR("stats updated by DMAE but no MAC active\n"); | 713 | DP(BNX2X_MSG_STATS, |
714 | "stats updated by DMAE but no MAC active\n"); | ||
714 | return -1; | 715 | return -1; |
715 | 716 | ||
716 | default: /* unreached */ | 717 | default: /* unreached */ |
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) | |||
1391 | 1392 | ||
1392 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) | 1393 | static void bnx2x_func_stats_base_init(struct bnx2x *bp) |
1393 | { | 1394 | { |
1394 | int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; | 1395 | int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; |
1395 | u32 func_stx; | 1396 | u32 func_stx; |
1396 | 1397 | ||
1397 | /* sanity */ | 1398 | /* sanity */ |
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) | |||
1404 | func_stx = bp->func_stx; | 1405 | func_stx = bp->func_stx; |
1405 | 1406 | ||
1406 | for (vn = VN_0; vn < vn_max; vn++) { | 1407 | for (vn = VN_0; vn < vn_max; vn++) { |
1407 | int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; | 1408 | int mb_idx = BP_FW_MB_IDX_VN(bp, vn); |
1408 | 1409 | ||
1409 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); | 1410 | bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); |
1410 | bnx2x_func_stats_init(bp); | 1411 | bnx2x_func_stats_init(bp); |