aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
authorEilon Greenstein <eilong@broadcom.com>2009-02-12 03:36:33 -0500
committerDavid S. Miller <davem@davemloft.net>2009-02-16 02:31:12 -0500
commitde832a55d28bdcc38a3f3c160554d2dfa5a62069 (patch)
tree21df2b3da20af907b36fa30e1229e4e87ee235a9 /drivers/net/bnx2x_main.c
parentd3d4f49527249e87d11219a22469dff25b674c08 (diff)
bnx2x: Per queue statistics
Re-ordering the statistics to enhance readability and adding per queue statistics (available via ethtool -S) Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c804
1 files changed, 545 insertions, 259 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index c27be17131f5..19865d5d8aed 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1271,7 +1271,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 where we are and drop the whole packet */ 1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) { 1273 if (unlikely(err)) {
1274 bp->eth_stats.rx_skb_alloc_failed++; 1274 fp->eth_q_stats.rx_skb_alloc_failed++;
1275 return err; 1275 return err;
1276 } 1276 }
1277 1277
@@ -1377,7 +1377,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1377 /* else drop the packet and keep the buffer in the bin */ 1377 /* else drop the packet and keep the buffer in the bin */
1378 DP(NETIF_MSG_RX_STATUS, 1378 DP(NETIF_MSG_RX_STATUS,
1379 "Failed to allocate new skb - dropping packet!\n"); 1379 "Failed to allocate new skb - dropping packet!\n");
1380 bp->eth_stats.rx_skb_alloc_failed++; 1380 fp->eth_q_stats.rx_skb_alloc_failed++;
1381 } 1381 }
1382 1382
1383 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1383 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1539,7 +1539,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1539 DP(NETIF_MSG_RX_ERR, 1539 DP(NETIF_MSG_RX_ERR,
1540 "ERROR flags %x rx packet %u\n", 1540 "ERROR flags %x rx packet %u\n",
1541 cqe_fp_flags, sw_comp_cons); 1541 cqe_fp_flags, sw_comp_cons);
1542 bp->eth_stats.rx_err_discard_pkt++; 1542 fp->eth_q_stats.rx_err_discard_pkt++;
1543 goto reuse_rx; 1543 goto reuse_rx;
1544 } 1544 }
1545 1545
@@ -1556,7 +1556,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1556 DP(NETIF_MSG_RX_ERR, 1556 DP(NETIF_MSG_RX_ERR,
1557 "ERROR packet dropped " 1557 "ERROR packet dropped "
1558 "because of alloc failure\n"); 1558 "because of alloc failure\n");
1559 bp->eth_stats.rx_skb_alloc_failed++; 1559 fp->eth_q_stats.rx_skb_alloc_failed++;
1560 goto reuse_rx; 1560 goto reuse_rx;
1561 } 1561 }
1562 1562
@@ -1582,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1582 DP(NETIF_MSG_RX_ERR, 1582 DP(NETIF_MSG_RX_ERR,
1583 "ERROR packet dropped because " 1583 "ERROR packet dropped because "
1584 "of alloc failure\n"); 1584 "of alloc failure\n");
1585 bp->eth_stats.rx_skb_alloc_failed++; 1585 fp->eth_q_stats.rx_skb_alloc_failed++;
1586reuse_rx: 1586reuse_rx:
1587 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1587 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1588 goto next_rx; 1588 goto next_rx;
@@ -1595,7 +1595,7 @@ reuse_rx:
1595 if (likely(BNX2X_RX_CSUM_OK(cqe))) 1595 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1596 skb->ip_summed = CHECKSUM_UNNECESSARY; 1596 skb->ip_summed = CHECKSUM_UNNECESSARY;
1597 else 1597 else
1598 bp->eth_stats.hw_csum_err++; 1598 fp->eth_q_stats.hw_csum_err++;
1599 } 1599 }
1600 } 1600 }
1601 1601
@@ -2897,10 +2897,6 @@ static void bnx2x_sp_task(struct work_struct *work)
2897 if (status & 0x1) 2897 if (status & 0x1)
2898 bnx2x_attn_int(bp); 2898 bnx2x_attn_int(bp);
2899 2899
2900 /* CStorm events: query_stats, port delete ramrod */
2901 if (status & 0x2)
2902 bp->stats_pending = 0;
2903
2904 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 2900 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2905 IGU_INT_NOP, 1); 2901 IGU_INT_NOP, 1);
2906 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), 2902 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
@@ -3016,14 +3012,39 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3016 do { \ 3012 do { \
3017 diff = le32_to_cpu(tclient->s) - old_tclient->s; \ 3013 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3018 old_tclient->s = le32_to_cpu(tclient->s); \ 3014 old_tclient->s = le32_to_cpu(tclient->s); \
3019 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \ 3015 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3016 } while (0)
3017
3018#define UPDATE_EXTEND_USTAT(s, t) \
3019 do { \
3020 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3021 old_uclient->s = uclient->s; \
3022 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3020 } while (0) 3023 } while (0)
3021 3024
3022#define UPDATE_EXTEND_XSTAT(s, t) \ 3025#define UPDATE_EXTEND_XSTAT(s, t) \
3023 do { \ 3026 do { \
3024 diff = le32_to_cpu(xclient->s) - old_xclient->s; \ 3027 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3025 old_xclient->s = le32_to_cpu(xclient->s); \ 3028 old_xclient->s = le32_to_cpu(xclient->s); \
3026 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \ 3029 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3030 } while (0)
3031
3032/* minuend -= subtrahend */
3033#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3034 do { \
3035 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3036 } while (0)
3037
3038/* minuend[hi:lo] -= subtrahend */
3039#define SUB_EXTEND_64(m_hi, m_lo, s) \
3040 do { \
3041 SUB_64(m_hi, 0, m_lo, s); \
3042 } while (0)
3043
3044#define SUB_EXTEND_USTAT(s, t) \
3045 do { \
3046 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3047 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3027 } while (0) 3048 } while (0)
3028 3049
3029/* 3050/*
@@ -3050,11 +3071,12 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050{ 3071{
3051 if (!bp->stats_pending) { 3072 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0}; 3073 struct eth_query_ramrod_data ramrod_data = {0};
3053 int rc; 3074 int i, rc;
3054 3075
3055 ramrod_data.drv_counter = bp->stats_counter++; 3076 ramrod_data.drv_counter = bp->stats_counter++;
3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 3077 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3057 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp)); 3078 for_each_queue(bp, i)
3079 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3058 3080
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 3081 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1], 3082 ((u32 *)&ramrod_data)[1],
@@ -3070,7 +3092,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
3070static void bnx2x_stats_init(struct bnx2x *bp) 3092static void bnx2x_stats_init(struct bnx2x *bp)
3071{ 3093{
3072 int port = BP_PORT(bp); 3094 int port = BP_PORT(bp);
3095 int i;
3073 3096
3097 bp->stats_pending = 0;
3074 bp->executer_idx = 0; 3098 bp->executer_idx = 0;
3075 bp->stats_counter = 0; 3099 bp->stats_counter = 0;
3076 3100
@@ -3092,9 +3116,19 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3092 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 3116 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3093 3117
3094 /* function stats */ 3118 /* function stats */
3119 for_each_queue(bp, i) {
3120 struct bnx2x_fastpath *fp = &bp->fp[i];
3121
3122 memset(&fp->old_tclient, 0,
3123 sizeof(struct tstorm_per_client_stats));
3124 memset(&fp->old_uclient, 0,
3125 sizeof(struct ustorm_per_client_stats));
3126 memset(&fp->old_xclient, 0,
3127 sizeof(struct xstorm_per_client_stats));
3128 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3129 }
3130
3095 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); 3131 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3096 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3097 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3098 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); 3132 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3099 3133
3100 bp->stats_state = STATS_STATE_DISABLED; 3134 bp->stats_state = STATS_STATE_DISABLED;
@@ -3108,6 +3142,8 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
3108 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 3142 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3109 3143
3110 *stats_comp = DMAE_COMP_VAL; 3144 *stats_comp = DMAE_COMP_VAL;
3145 if (CHIP_REV_IS_SLOW(bp))
3146 return;
3111 3147
3112 /* loader */ 3148 /* loader */
3113 if (bp->executer_idx) { 3149 if (bp->executer_idx) {
@@ -3497,6 +3533,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3497{ 3533{
3498 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); 3534 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3499 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 3535 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3536 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3500 struct regpair diff; 3537 struct regpair diff;
3501 3538
3502 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 3539 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
@@ -3507,7 +3544,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3507 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3544 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3508 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 3545 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3509 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3546 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3510 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3547 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3511 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3548 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3512 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 3549 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3513 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 3550 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
@@ -3528,12 +3565,23 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3528 UPDATE_STAT64(tx_stat_gterr, 3565 UPDATE_STAT64(tx_stat_gterr,
3529 tx_stat_dot3statsinternalmactransmiterrors); 3566 tx_stat_dot3statsinternalmactransmiterrors);
3530 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); 3567 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3568
3569 estats->pause_frames_received_hi =
3570 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3571 estats->pause_frames_received_lo =
3572 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3573
3574 estats->pause_frames_sent_hi =
3575 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3576 estats->pause_frames_sent_lo =
3577 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3531} 3578}
3532 3579
3533static void bnx2x_emac_stats_update(struct bnx2x *bp) 3580static void bnx2x_emac_stats_update(struct bnx2x *bp)
3534{ 3581{
3535 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); 3582 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3536 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 3583 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3584 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3537 3585
3538 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); 3586 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3539 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); 3587 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
@@ -3566,6 +3614,24 @@ static void bnx2x_emac_stats_update(struct bnx2x *bp)
3566 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); 3614 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3567 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); 3615 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3568 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); 3616 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3617
3618 estats->pause_frames_received_hi =
3619 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3620 estats->pause_frames_received_lo =
3621 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3622 ADD_64(estats->pause_frames_received_hi,
3623 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3624 estats->pause_frames_received_lo,
3625 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3626
3627 estats->pause_frames_sent_hi =
3628 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3629 estats->pause_frames_sent_lo =
3630 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3631 ADD_64(estats->pause_frames_sent_hi,
3632 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3633 estats->pause_frames_sent_lo,
3634 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3569} 3635}
3570 3636
3571static int bnx2x_hw_stats_update(struct bnx2x *bp) 3637static int bnx2x_hw_stats_update(struct bnx2x *bp)
@@ -3575,6 +3641,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 3641 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3576 struct bnx2x_eth_stats *estats = &bp->eth_stats; 3642 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3577 struct regpair diff; 3643 struct regpair diff;
3644 u32 nig_timer_max;
3578 3645
3579 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) 3646 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3580 bnx2x_bmac_stats_update(bp); 3647 bnx2x_bmac_stats_update(bp);
@@ -3605,134 +3672,211 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3605 3672
3606 pstats->host_port_stats_start = ++pstats->host_port_stats_end; 3673 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3607 3674
3675 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3676 if (nig_timer_max != estats->nig_timer_max) {
3677 estats->nig_timer_max = nig_timer_max;
3678 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3679 }
3680
3608 return 0; 3681 return 0;
3609} 3682}
3610 3683
3611static int bnx2x_storm_stats_update(struct bnx2x *bp) 3684static int bnx2x_storm_stats_update(struct bnx2x *bp)
3612{ 3685{
3613 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); 3686 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3614 int cl_id = BP_CL_ID(bp);
3615 struct tstorm_per_port_stats *tport = 3687 struct tstorm_per_port_stats *tport =
3616 &stats->tstorm_common.port_statistics; 3688 &stats->tstorm_common.port_statistics;
3617 struct tstorm_per_client_stats *tclient =
3618 &stats->tstorm_common.client_statistics[cl_id];
3619 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3620 struct xstorm_per_client_stats *xclient =
3621 &stats->xstorm_common.client_statistics[cl_id];
3622 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3623 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); 3689 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3624 struct bnx2x_eth_stats *estats = &bp->eth_stats; 3690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3625 u32 diff; 3691 int i;
3692
3693 memset(&(fstats->total_bytes_received_hi), 0,
3694 sizeof(struct host_func_stats) - 2*sizeof(u32));
3695 estats->error_bytes_received_hi = 0;
3696 estats->error_bytes_received_lo = 0;
3697 estats->etherstatsoverrsizepkts_hi = 0;
3698 estats->etherstatsoverrsizepkts_lo = 0;
3699 estats->no_buff_discard_hi = 0;
3700 estats->no_buff_discard_lo = 0;
3626 3701
3627 /* are storm stats valid? */ 3702 for_each_queue(bp, i) {
3628 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != 3703 struct bnx2x_fastpath *fp = &bp->fp[i];
3704 int cl_id = fp->cl_id;
3705 struct tstorm_per_client_stats *tclient =
3706 &stats->tstorm_common.client_statistics[cl_id];
3707 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3708 struct ustorm_per_client_stats *uclient =
3709 &stats->ustorm_common.client_statistics[cl_id];
3710 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3711 struct xstorm_per_client_stats *xclient =
3712 &stats->xstorm_common.client_statistics[cl_id];
3713 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3714 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3715 u32 diff;
3716
3717 /* are storm stats valid? */
3718 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3629 bp->stats_counter) { 3719 bp->stats_counter) {
3630 DP(BNX2X_MSG_STATS, "stats not updated by tstorm" 3720 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3631 " tstorm counter (%d) != stats_counter (%d)\n", 3721 " xstorm counter (%d) != stats_counter (%d)\n",
3632 tclient->stats_counter, bp->stats_counter); 3722 i, xclient->stats_counter, bp->stats_counter);
3633 return -1; 3723 return -1;
3634 } 3724 }
3635 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != 3725 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3636 bp->stats_counter) { 3726 bp->stats_counter) {
3637 DP(BNX2X_MSG_STATS, "stats not updated by xstorm" 3727 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3638 " xstorm counter (%d) != stats_counter (%d)\n", 3728 " tstorm counter (%d) != stats_counter (%d)\n",
3639 xclient->stats_counter, bp->stats_counter); 3729 i, tclient->stats_counter, bp->stats_counter);
3640 return -2; 3730 return -2;
3641 } 3731 }
3732 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3733 bp->stats_counter) {
3734 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3735 " ustorm counter (%d) != stats_counter (%d)\n",
3736 i, uclient->stats_counter, bp->stats_counter);
3737 return -4;
3738 }
3642 3739
3643 fstats->total_bytes_received_hi = 3740 qstats->total_bytes_received_hi =
3644 fstats->valid_bytes_received_hi = 3741 qstats->valid_bytes_received_hi =
3645 le32_to_cpu(tclient->total_rcv_bytes.hi); 3742 le32_to_cpu(tclient->total_rcv_bytes.hi);
3646 fstats->total_bytes_received_lo = 3743 qstats->total_bytes_received_lo =
3647 fstats->valid_bytes_received_lo = 3744 qstats->valid_bytes_received_lo =
3648 le32_to_cpu(tclient->total_rcv_bytes.lo); 3745 le32_to_cpu(tclient->total_rcv_bytes.lo);
3649 3746
3650 estats->error_bytes_received_hi = 3747 qstats->error_bytes_received_hi =
3651 le32_to_cpu(tclient->rcv_error_bytes.hi); 3748 le32_to_cpu(tclient->rcv_error_bytes.hi);
3652 estats->error_bytes_received_lo = 3749 qstats->error_bytes_received_lo =
3653 le32_to_cpu(tclient->rcv_error_bytes.lo); 3750 le32_to_cpu(tclient->rcv_error_bytes.lo);
3654 ADD_64(estats->error_bytes_received_hi,
3655 estats->rx_stat_ifhcinbadoctets_hi,
3656 estats->error_bytes_received_lo,
3657 estats->rx_stat_ifhcinbadoctets_lo);
3658 3751
3659 ADD_64(fstats->total_bytes_received_hi, 3752 ADD_64(qstats->total_bytes_received_hi,
3660 estats->error_bytes_received_hi, 3753 qstats->error_bytes_received_hi,
3661 fstats->total_bytes_received_lo, 3754 qstats->total_bytes_received_lo,
3662 estats->error_bytes_received_lo); 3755 qstats->error_bytes_received_lo);
3663 3756
3664 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received); 3757 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3665 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, 3758 total_unicast_packets_received);
3666 total_multicast_packets_received); 3759 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3667 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, 3760 total_multicast_packets_received);
3668 total_broadcast_packets_received); 3761 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3669 3762 total_broadcast_packets_received);
3670 fstats->total_bytes_transmitted_hi = 3763 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3764 etherstatsoverrsizepkts);
3765 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3766
3767 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3768 total_unicast_packets_received);
3769 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3770 total_multicast_packets_received);
3771 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3772 total_broadcast_packets_received);
3773 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3774 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3775 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3776
3777 qstats->total_bytes_transmitted_hi =
3671 le32_to_cpu(xclient->total_sent_bytes.hi); 3778 le32_to_cpu(xclient->total_sent_bytes.hi);
3672 fstats->total_bytes_transmitted_lo = 3779 qstats->total_bytes_transmitted_lo =
3673 le32_to_cpu(xclient->total_sent_bytes.lo); 3780 le32_to_cpu(xclient->total_sent_bytes.lo);
3674 3781
3675 UPDATE_EXTEND_XSTAT(unicast_pkts_sent, 3782 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3676 total_unicast_packets_transmitted); 3783 total_unicast_packets_transmitted);
3677 UPDATE_EXTEND_XSTAT(multicast_pkts_sent, 3784 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3678 total_multicast_packets_transmitted); 3785 total_multicast_packets_transmitted);
3679 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, 3786 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3680 total_broadcast_packets_transmitted); 3787 total_broadcast_packets_transmitted);
3788
3789 old_tclient->checksum_discard = tclient->checksum_discard;
3790 old_tclient->ttl0_discard = tclient->ttl0_discard;
3791
3792 ADD_64(fstats->total_bytes_received_hi,
3793 qstats->total_bytes_received_hi,
3794 fstats->total_bytes_received_lo,
3795 qstats->total_bytes_received_lo);
3796 ADD_64(fstats->total_bytes_transmitted_hi,
3797 qstats->total_bytes_transmitted_hi,
3798 fstats->total_bytes_transmitted_lo,
3799 qstats->total_bytes_transmitted_lo);
3800 ADD_64(fstats->total_unicast_packets_received_hi,
3801 qstats->total_unicast_packets_received_hi,
3802 fstats->total_unicast_packets_received_lo,
3803 qstats->total_unicast_packets_received_lo);
3804 ADD_64(fstats->total_multicast_packets_received_hi,
3805 qstats->total_multicast_packets_received_hi,
3806 fstats->total_multicast_packets_received_lo,
3807 qstats->total_multicast_packets_received_lo);
3808 ADD_64(fstats->total_broadcast_packets_received_hi,
3809 qstats->total_broadcast_packets_received_hi,
3810 fstats->total_broadcast_packets_received_lo,
3811 qstats->total_broadcast_packets_received_lo);
3812 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3813 qstats->total_unicast_packets_transmitted_hi,
3814 fstats->total_unicast_packets_transmitted_lo,
3815 qstats->total_unicast_packets_transmitted_lo);
3816 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3817 qstats->total_multicast_packets_transmitted_hi,
3818 fstats->total_multicast_packets_transmitted_lo,
3819 qstats->total_multicast_packets_transmitted_lo);
3820 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3821 qstats->total_broadcast_packets_transmitted_hi,
3822 fstats->total_broadcast_packets_transmitted_lo,
3823 qstats->total_broadcast_packets_transmitted_lo);
3824 ADD_64(fstats->valid_bytes_received_hi,
3825 qstats->valid_bytes_received_hi,
3826 fstats->valid_bytes_received_lo,
3827 qstats->valid_bytes_received_lo);
3828
3829 ADD_64(estats->error_bytes_received_hi,
3830 qstats->error_bytes_received_hi,
3831 estats->error_bytes_received_lo,
3832 qstats->error_bytes_received_lo);
3833 ADD_64(estats->etherstatsoverrsizepkts_hi,
3834 qstats->etherstatsoverrsizepkts_hi,
3835 estats->etherstatsoverrsizepkts_lo,
3836 qstats->etherstatsoverrsizepkts_lo);
3837 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3838 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3839 }
3840
3841 ADD_64(fstats->total_bytes_received_hi,
3842 estats->rx_stat_ifhcinbadoctets_hi,
3843 fstats->total_bytes_received_lo,
3844 estats->rx_stat_ifhcinbadoctets_lo);
3681 3845
3682 memcpy(estats, &(fstats->total_bytes_received_hi), 3846 memcpy(estats, &(fstats->total_bytes_received_hi),
3683 sizeof(struct host_func_stats) - 2*sizeof(u32)); 3847 sizeof(struct host_func_stats) - 2*sizeof(u32));
3684 3848
3685 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard); 3849 ADD_64(estats->etherstatsoverrsizepkts_hi,
3686 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard); 3850 estats->rx_stat_dot3statsframestoolong_hi,
3687 estats->brb_truncate_discard = 3851 estats->etherstatsoverrsizepkts_lo,
3852 estats->rx_stat_dot3statsframestoolong_lo);
3853 ADD_64(estats->error_bytes_received_hi,
3854 estats->rx_stat_ifhcinbadoctets_hi,
3855 estats->error_bytes_received_lo,
3856 estats->rx_stat_ifhcinbadoctets_lo);
3857
3858 if (bp->port.pmf) {
3859 estats->mac_filter_discard =
3860 le32_to_cpu(tport->mac_filter_discard);
3861 estats->xxoverflow_discard =
3862 le32_to_cpu(tport->xxoverflow_discard);
3863 estats->brb_truncate_discard =
3688 le32_to_cpu(tport->brb_truncate_discard); 3864 le32_to_cpu(tport->brb_truncate_discard);
3689 estats->mac_discard = le32_to_cpu(tport->mac_discard); 3865 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3690 3866 }
3691 old_tclient->rcv_unicast_bytes.hi =
3692 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3693 old_tclient->rcv_unicast_bytes.lo =
3694 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3695 old_tclient->rcv_broadcast_bytes.hi =
3696 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3697 old_tclient->rcv_broadcast_bytes.lo =
3698 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3699 old_tclient->rcv_multicast_bytes.hi =
3700 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3701 old_tclient->rcv_multicast_bytes.lo =
3702 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3703 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3704
3705 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3706 old_tclient->packets_too_big_discard =
3707 le32_to_cpu(tclient->packets_too_big_discard);
3708 estats->no_buff_discard =
3709 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3710 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3711
3712 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3713 old_xclient->unicast_bytes_sent.hi =
3714 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3715 old_xclient->unicast_bytes_sent.lo =
3716 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3717 old_xclient->multicast_bytes_sent.hi =
3718 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3719 old_xclient->multicast_bytes_sent.lo =
3720 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3721 old_xclient->broadcast_bytes_sent.hi =
3722 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3723 old_xclient->broadcast_bytes_sent.lo =
3724 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3725 3867
3726 fstats->host_func_stats_start = ++fstats->host_func_stats_end; 3868 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3727 3869
3870 bp->stats_pending = 0;
3871
3728 return 0; 3872 return 0;
3729} 3873}
3730 3874
3731static void bnx2x_net_stats_update(struct bnx2x *bp) 3875static void bnx2x_net_stats_update(struct bnx2x *bp)
3732{ 3876{
3733 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3734 struct bnx2x_eth_stats *estats = &bp->eth_stats; 3877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3735 struct net_device_stats *nstats = &bp->dev->stats; 3878 struct net_device_stats *nstats = &bp->dev->stats;
3879 int i;
3736 3880
3737 nstats->rx_packets = 3881 nstats->rx_packets =
3738 bnx2x_hilo(&estats->total_unicast_packets_received_hi) + 3882 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
@@ -3744,34 +3888,33 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3744 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + 3888 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3745 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); 3889 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3746 3890
3747 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi); 3891 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3748 3892
3749 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 3893 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3750 3894
3751 nstats->rx_dropped = old_tclient->checksum_discard + 3895 nstats->rx_dropped = estats->mac_discard;
3752 estats->mac_discard; 3896 for_each_queue(bp, i)
3897 nstats->rx_dropped +=
3898 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3899
3753 nstats->tx_dropped = 0; 3900 nstats->tx_dropped = 0;
3754 3901
3755 nstats->multicast = 3902 nstats->multicast =
3756 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi); 3903 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3757 3904
3758 nstats->collisions = 3905 nstats->collisions =
3759 estats->tx_stat_dot3statssinglecollisionframes_lo + 3906 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3760 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3761 estats->tx_stat_dot3statslatecollisions_lo +
3762 estats->tx_stat_dot3statsexcessivecollisions_lo;
3763
3764 estats->jabber_packets_received =
3765 old_tclient->packets_too_big_discard +
3766 estats->rx_stat_dot3statsframestoolong_lo;
3767 3907
3768 nstats->rx_length_errors = 3908 nstats->rx_length_errors =
3769 estats->rx_stat_etherstatsundersizepkts_lo + 3909 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3770 estats->jabber_packets_received; 3910 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3771 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo; 3911 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3772 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3912 bnx2x_hilo(&estats->brb_truncate_hi);
3773 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3913 nstats->rx_crc_errors =
3774 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3914 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3915 nstats->rx_frame_errors =
3916 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3917 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3775 nstats->rx_missed_errors = estats->xxoverflow_discard; 3918 nstats->rx_missed_errors = estats->xxoverflow_discard;
3776 3919
3777 nstats->rx_errors = nstats->rx_length_errors + 3920 nstats->rx_errors = nstats->rx_length_errors +
@@ -3782,46 +3925,61 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3782 nstats->rx_missed_errors; 3925 nstats->rx_missed_errors;
3783 3926
3784 nstats->tx_aborted_errors = 3927 nstats->tx_aborted_errors =
3785 estats->tx_stat_dot3statslatecollisions_lo + 3928 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3786 estats->tx_stat_dot3statsexcessivecollisions_lo; 3929 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3787 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo; 3930 nstats->tx_carrier_errors =
3931 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3788 nstats->tx_fifo_errors = 0; 3932 nstats->tx_fifo_errors = 0;
3789 nstats->tx_heartbeat_errors = 0; 3933 nstats->tx_heartbeat_errors = 0;
3790 nstats->tx_window_errors = 0; 3934 nstats->tx_window_errors = 0;
3791 3935
3792 nstats->tx_errors = nstats->tx_aborted_errors + 3936 nstats->tx_errors = nstats->tx_aborted_errors +
3793 nstats->tx_carrier_errors; 3937 nstats->tx_carrier_errors +
3938 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3939}
3940
3941static void bnx2x_drv_stats_update(struct bnx2x *bp)
3942{
3943 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3944 int i;
3945
3946 estats->driver_xoff = 0;
3947 estats->rx_err_discard_pkt = 0;
3948 estats->rx_skb_alloc_failed = 0;
3949 estats->hw_csum_err = 0;
3950 for_each_queue(bp, i) {
3951 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3952
3953 estats->driver_xoff += qstats->driver_xoff;
3954 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3955 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3956 estats->hw_csum_err += qstats->hw_csum_err;
3957 }
3794} 3958}
3795 3959
3796static void bnx2x_stats_update(struct bnx2x *bp) 3960static void bnx2x_stats_update(struct bnx2x *bp)
3797{ 3961{
3798 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 3962 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3799 int update = 0;
3800 3963
3801 if (*stats_comp != DMAE_COMP_VAL) 3964 if (*stats_comp != DMAE_COMP_VAL)
3802 return; 3965 return;
3803 3966
3804 if (bp->port.pmf) 3967 if (bp->port.pmf)
3805 update = (bnx2x_hw_stats_update(bp) == 0); 3968 bnx2x_hw_stats_update(bp);
3806
3807 update |= (bnx2x_storm_stats_update(bp) == 0);
3808 3969
3809 if (update) 3970 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3810 bnx2x_net_stats_update(bp); 3971 BNX2X_ERR("storm stats were not updated for 3 times\n");
3811 3972 bnx2x_panic();
3812 else { 3973 return;
3813 if (bp->stats_pending) {
3814 bp->stats_pending++;
3815 if (bp->stats_pending == 3) {
3816 BNX2X_ERR("stats not updated for 3 times\n");
3817 bnx2x_panic();
3818 return;
3819 }
3820 }
3821 } 3974 }
3822 3975
3976 bnx2x_net_stats_update(bp);
3977 bnx2x_drv_stats_update(bp);
3978
3823 if (bp->msglevel & NETIF_MSG_TIMER) { 3979 if (bp->msglevel & NETIF_MSG_TIMER) {
3824 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient; 3980 struct tstorm_per_client_stats *old_tclient =
3981 &bp->fp->old_tclient;
3982 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3825 struct bnx2x_eth_stats *estats = &bp->eth_stats; 3983 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3826 struct net_device_stats *nstats = &bp->dev->stats; 3984 struct net_device_stats *nstats = &bp->dev->stats;
3827 int i; 3985 int i;
@@ -3836,19 +3994,21 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3836 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) - 3994 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3837 bp->fp->rx_comp_cons), 3995 bp->fp->rx_comp_cons),
3838 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3996 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3839 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3997 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3840 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon", 3998 "brb truncate %u\n",
3841 estats->driver_xoff, estats->brb_drop_lo); 3999 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4000 qstats->driver_xoff,
4001 estats->brb_drop_lo, estats->brb_truncate_lo);
3842 printk(KERN_DEBUG "tstats: checksum_discard %u " 4002 printk(KERN_DEBUG "tstats: checksum_discard %u "
3843 "packets_too_big_discard %u no_buff_discard %u " 4003 "packets_too_big_discard %lu no_buff_discard %lu "
3844 "mac_discard %u mac_filter_discard %u " 4004 "mac_discard %u mac_filter_discard %u "
3845 "xxovrflow_discard %u brb_truncate_discard %u " 4005 "xxovrflow_discard %u brb_truncate_discard %u "
3846 "ttl0_discard %u\n", 4006 "ttl0_discard %u\n",
3847 old_tclient->checksum_discard, 4007 old_tclient->checksum_discard,
3848 old_tclient->packets_too_big_discard, 4008 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3849 old_tclient->no_buff_discard, estats->mac_discard, 4009 bnx2x_hilo(&qstats->no_buff_discard_hi),
3850 estats->mac_filter_discard, estats->xxoverflow_discard, 4010 estats->mac_discard, estats->mac_filter_discard,
3851 estats->brb_truncate_discard, 4011 estats->xxoverflow_discard, estats->brb_truncate_discard,
3852 old_tclient->ttl0_discard); 4012 old_tclient->ttl0_discard);
3853 4013
3854 for_each_queue(bp, i) { 4014 for_each_queue(bp, i) {
@@ -4400,8 +4560,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4400 for (i = 0; i < bp->rx_ring_size; i++) { 4560 for (i = 0; i < bp->rx_ring_size; i++) {
4401 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4561 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4402 BNX2X_ERR("was only able to allocate " 4562 BNX2X_ERR("was only able to allocate "
4403 "%d rx skbs\n", i); 4563 "%d rx skbs on queue[%d]\n", i, j);
4404 bp->eth_stats.rx_skb_alloc_failed++; 4564 fp->eth_q_stats.rx_skb_alloc_failed++;
4405 break; 4565 break;
4406 } 4566 }
4407 ring_prod = NEXT_RX_IDX(ring_prod); 4567 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4490,6 +4650,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4490 for_each_queue(bp, i) { 4650 for_each_queue(bp, i) {
4491 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 4651 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4492 struct bnx2x_fastpath *fp = &bp->fp[i]; 4652 struct bnx2x_fastpath *fp = &bp->fp[i];
4653 u8 cl_id = fp->cl_id;
4493 u8 sb_id = FP_SB_ID(fp); 4654 u8 sb_id = FP_SB_ID(fp);
4494 4655
4495 context->ustorm_st_context.common.sb_index_numbers = 4656 context->ustorm_st_context.common.sb_index_numbers =
@@ -4497,7 +4658,10 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp); 4658 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id; 4659 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags = 4660 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; 4661 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4662 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4663 context->ustorm_st_context.common.statistics_counter_id =
4664 cl_id;
4501 context->ustorm_st_context.common.mc_alignment_log_size = 4665 context->ustorm_st_context.common.mc_alignment_log_size =
4502 BNX2X_RX_ALIGN_SHIFT; 4666 BNX2X_RX_ALIGN_SHIFT;
4503 context->ustorm_st_context.common.bd_buff_size = 4667 context->ustorm_st_context.common.bd_buff_size =
@@ -4568,9 +4732,9 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4568 int i; 4732 int i;
4569 4733
4570 tstorm_client.mtu = bp->dev->mtu; 4734 tstorm_client.mtu = bp->dev->mtu;
4571 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4572 tstorm_client.config_flags = 4735 tstorm_client.config_flags =
4573 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4736 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4737 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4574#ifdef BCM_VLAN 4738#ifdef BCM_VLAN
4575 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) { 4739 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4576 tstorm_client.config_flags |= 4740 tstorm_client.config_flags |=
@@ -4592,6 +4756,8 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4592 } 4756 }
4593 4757
4594 for_each_queue(bp, i) { 4758 for_each_queue(bp, i) {
4759 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4760
4595 REG_WR(bp, BAR_TSTRORM_INTMEM + 4761 REG_WR(bp, BAR_TSTRORM_INTMEM +
4596 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id), 4762 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4597 ((u32 *)&tstorm_client)[0]); 4763 ((u32 *)&tstorm_client)[0]);
@@ -4688,7 +4854,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4688 struct stats_indication_flags stats_flags = {0}; 4854 struct stats_indication_flags stats_flags = {0};
4689 int port = BP_PORT(bp); 4855 int port = BP_PORT(bp);
4690 int func = BP_FUNC(bp); 4856 int func = BP_FUNC(bp);
4691 int i; 4857 int i, j;
4858 u32 offset;
4692 u16 max_agg_size; 4859 u16 max_agg_size;
4693 4860
4694 if (is_multi(bp)) { 4861 if (is_multi(bp)) {
@@ -4708,17 +4875,29 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4708 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4875 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4709 bnx2x_set_storm_rx_mode(bp); 4876 bnx2x_set_storm_rx_mode(bp);
4710 4877
4711 /* reset xstorm per client statistics */ 4878 for_each_queue(bp, i) {
4712 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) { 4879 u8 cl_id = bp->fp[i].cl_id;
4713 REG_WR(bp, BAR_XSTRORM_INTMEM + 4880
4714 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) + 4881 /* reset xstorm per client statistics */
4715 i*4, 0); 4882 offset = BAR_XSTRORM_INTMEM +
4716 } 4883 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4717 /* reset tstorm per client statistics */ 4884 for (j = 0;
4718 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) { 4885 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4719 REG_WR(bp, BAR_TSTRORM_INTMEM + 4886 REG_WR(bp, offset + j*4, 0);
4720 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) + 4887
4721 i*4, 0); 4888 /* reset tstorm per client statistics */
4889 offset = BAR_TSTRORM_INTMEM +
4890 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4891 for (j = 0;
4892 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4893 REG_WR(bp, offset + j*4, 0);
4894
4895 /* reset ustorm per client statistics */
4896 offset = BAR_USTRORM_INTMEM +
4897 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4898 for (j = 0;
4899 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4900 REG_WR(bp, offset + j*4, 0);
4722 } 4901 }
4723 4902
4724 /* Init statistics related context */ 4903 /* Init statistics related context */
@@ -4734,6 +4913,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4734 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4, 4913 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4735 ((u32 *)&stats_flags)[1]); 4914 ((u32 *)&stats_flags)[1]);
4736 4915
4916 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4917 ((u32 *)&stats_flags)[0]);
4918 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4919 ((u32 *)&stats_flags)[1]);
4920
4737 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 4921 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4738 ((u32 *)&stats_flags)[0]); 4922 ((u32 *)&stats_flags)[0]);
4739 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4, 4923 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
@@ -4753,6 +4937,13 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4753 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4, 4937 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4754 U64_HI(bnx2x_sp_mapping(bp, fw_stats))); 4938 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4755 4939
4940 REG_WR(bp, BAR_USTRORM_INTMEM +
4941 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4942 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4943 REG_WR(bp, BAR_USTRORM_INTMEM +
4944 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4945 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4946
4756 if (CHIP_IS_E1H(bp)) { 4947 if (CHIP_IS_E1H(bp)) {
4757 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4948 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4758 IS_E1HMF(bp)); 4949 IS_E1HMF(bp));
@@ -9210,40 +9401,98 @@ static void bnx2x_self_test(struct net_device *dev,
9210static const struct { 9401static const struct {
9211 long offset; 9402 long offset;
9212 int size; 9403 int size;
9404 u8 string[ETH_GSTRING_LEN];
9405} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9406/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9407 { Q_STATS_OFFSET32(error_bytes_received_hi),
9408 8, "[%d]: rx_error_bytes" },
9409 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9410 8, "[%d]: rx_ucast_packets" },
9411 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9412 8, "[%d]: rx_mcast_packets" },
9413 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9414 8, "[%d]: rx_bcast_packets" },
9415 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9416 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9417 4, "[%d]: rx_phy_ip_err_discards"},
9418 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9419 4, "[%d]: rx_skb_alloc_discard" },
9420 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9421
9422/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9423 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9424 8, "[%d]: tx_packets" }
9425};
9426
9427static const struct {
9428 long offset;
9429 int size;
9213 u32 flags; 9430 u32 flags;
9214#define STATS_FLAGS_PORT 1 9431#define STATS_FLAGS_PORT 1
9215#define STATS_FLAGS_FUNC 2 9432#define STATS_FLAGS_FUNC 2
9433#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9216 u8 string[ETH_GSTRING_LEN]; 9434 u8 string[ETH_GSTRING_LEN];
9217} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 9435} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9218/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 9436/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9219 8, STATS_FLAGS_FUNC, "rx_bytes" }, 9437 8, STATS_FLAGS_BOTH, "rx_bytes" },
9220 { STATS_OFFSET32(error_bytes_received_hi), 9438 { STATS_OFFSET32(error_bytes_received_hi),
9221 8, STATS_FLAGS_FUNC, "rx_error_bytes" }, 9439 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9222 { STATS_OFFSET32(total_bytes_transmitted_hi),
9223 8, STATS_FLAGS_FUNC, "tx_bytes" },
9224 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9225 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9226 { STATS_OFFSET32(total_unicast_packets_received_hi), 9440 { STATS_OFFSET32(total_unicast_packets_received_hi),
9227 8, STATS_FLAGS_FUNC, "rx_ucast_packets" }, 9441 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9228 { STATS_OFFSET32(total_multicast_packets_received_hi), 9442 { STATS_OFFSET32(total_multicast_packets_received_hi),
9229 8, STATS_FLAGS_FUNC, "rx_mcast_packets" }, 9443 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9230 { STATS_OFFSET32(total_broadcast_packets_received_hi), 9444 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9231 8, STATS_FLAGS_FUNC, "rx_bcast_packets" }, 9445 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9232 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9233 8, STATS_FLAGS_FUNC, "tx_packets" },
9234 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9235 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9236/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9237 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9238 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 9446 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9239 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 9447 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9240 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 9448 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9241 8, STATS_FLAGS_PORT, "rx_align_errors" }, 9449 8, STATS_FLAGS_PORT, "rx_align_errors" },
9450 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9451 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9452 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9453 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9454/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9455 8, STATS_FLAGS_PORT, "rx_fragments" },
9456 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9457 8, STATS_FLAGS_PORT, "rx_jabbers" },
9458 { STATS_OFFSET32(no_buff_discard_hi),
9459 8, STATS_FLAGS_BOTH, "rx_discards" },
9460 { STATS_OFFSET32(mac_filter_discard),
9461 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9462 { STATS_OFFSET32(xxoverflow_discard),
9463 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9464 { STATS_OFFSET32(brb_drop_hi),
9465 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9466 { STATS_OFFSET32(brb_truncate_hi),
9467 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9468 { STATS_OFFSET32(pause_frames_received_hi),
9469 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9470 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9471 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9472 { STATS_OFFSET32(nig_timer_max),
9473 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9474/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9475 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9476 { STATS_OFFSET32(rx_skb_alloc_failed),
9477 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9478 { STATS_OFFSET32(hw_csum_err),
9479 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9480
9481 { STATS_OFFSET32(total_bytes_transmitted_hi),
9482 8, STATS_FLAGS_BOTH, "tx_bytes" },
9483 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9484 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9485 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9486 8, STATS_FLAGS_BOTH, "tx_packets" },
9487 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9488 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9489 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9490 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9242 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 9491 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9243 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 9492 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9244 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 9493 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9245 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 9494 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9246 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 9495/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9247 8, STATS_FLAGS_PORT, "tx_deferred" }, 9496 8, STATS_FLAGS_PORT, "tx_deferred" },
9248 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 9497 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9249 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 9498 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -9251,14 +9500,6 @@ static const struct {
9251 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 9500 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9252 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9501 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9253 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 9502 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9254 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9255 8, STATS_FLAGS_PORT, "rx_fragments" },
9256/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9257 8, STATS_FLAGS_PORT, "rx_jabbers" },
9258 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9259 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9260 { STATS_OFFSET32(jabber_packets_received),
9261 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9262 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9503 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9263 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 9504 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9264 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9505 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
@@ -9271,52 +9512,46 @@ static const struct {
9271 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 9512 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9272 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9513 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9273 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 9514 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9274 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9515/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9275 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 9516 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9276/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9517 { STATS_OFFSET32(pause_frames_sent_hi),
9277 8, STATS_FLAGS_PORT, "rx_xon_frames" }, 9518 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9278 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9279 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9280 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9281 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9282 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9283 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9284 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9285 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9286 { STATS_OFFSET32(mac_filter_discard),
9287 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9288 { STATS_OFFSET32(no_buff_discard),
9289 4, STATS_FLAGS_FUNC, "rx_discards" },
9290 { STATS_OFFSET32(xxoverflow_discard),
9291 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9292 { STATS_OFFSET32(brb_drop_hi),
9293 8, STATS_FLAGS_PORT, "brb_discard" },
9294 { STATS_OFFSET32(brb_truncate_hi),
9295 8, STATS_FLAGS_PORT, "brb_truncate" },
9296/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9297 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9298 { STATS_OFFSET32(rx_skb_alloc_failed),
9299 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9300/* 42 */{ STATS_OFFSET32(hw_csum_err),
9301 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9302}; 9519};
9303 9520
9304#define IS_NOT_E1HMF_STAT(bp, i) \ 9521#define IS_PORT_STAT(i) \
9305 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT)) 9522 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9523#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9524#define IS_E1HMF_MODE_STAT(bp) \
9525 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9306 9526
9307static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9527static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9308{ 9528{
9309 struct bnx2x *bp = netdev_priv(dev); 9529 struct bnx2x *bp = netdev_priv(dev);
9310 int i, j; 9530 int i, j, k;
9311 9531
9312 switch (stringset) { 9532 switch (stringset) {
9313 case ETH_SS_STATS: 9533 case ETH_SS_STATS:
9314 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9534 if (is_multi(bp)) {
9315 if (IS_NOT_E1HMF_STAT(bp, i)) 9535 k = 0;
9316 continue; 9536 for_each_queue(bp, i) {
9317 strcpy(buf + j*ETH_GSTRING_LEN, 9537 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9318 bnx2x_stats_arr[i].string); 9538 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9319 j++; 9539 bnx2x_q_stats_arr[j].string, i);
9540 k += BNX2X_NUM_Q_STATS;
9541 }
9542 if (IS_E1HMF_MODE_STAT(bp))
9543 break;
9544 for (j = 0; j < BNX2X_NUM_STATS; j++)
9545 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9546 bnx2x_stats_arr[j].string);
9547 } else {
9548 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9549 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9550 continue;
9551 strcpy(buf + j*ETH_GSTRING_LEN,
9552 bnx2x_stats_arr[i].string);
9553 j++;
9554 }
9320 } 9555 }
9321 break; 9556 break;
9322 9557
@@ -9329,13 +9564,22 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9329static int bnx2x_get_stats_count(struct net_device *dev) 9564static int bnx2x_get_stats_count(struct net_device *dev)
9330{ 9565{
9331 struct bnx2x *bp = netdev_priv(dev); 9566 struct bnx2x *bp = netdev_priv(dev);
9332 int i, num_stats = 0; 9567 int i, num_stats;
9333 9568
9334 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9569 if (is_multi(bp)) {
9335 if (IS_NOT_E1HMF_STAT(bp, i)) 9570 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9336 continue; 9571 if (!IS_E1HMF_MODE_STAT(bp))
9337 num_stats++; 9572 num_stats += BNX2X_NUM_STATS;
9573 } else {
9574 if (IS_E1HMF_MODE_STAT(bp)) {
9575 num_stats = 0;
9576 for (i = 0; i < BNX2X_NUM_STATS; i++)
9577 if (IS_FUNC_STAT(i))
9578 num_stats++;
9579 } else
9580 num_stats = BNX2X_NUM_STATS;
9338 } 9581 }
9582
9339 return num_stats; 9583 return num_stats;
9340} 9584}
9341 9585
@@ -9343,29 +9587,71 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
9343 struct ethtool_stats *stats, u64 *buf) 9587 struct ethtool_stats *stats, u64 *buf)
9344{ 9588{
9345 struct bnx2x *bp = netdev_priv(dev); 9589 struct bnx2x *bp = netdev_priv(dev);
9346 u32 *hw_stats = (u32 *)&bp->eth_stats; 9590 u32 *hw_stats, *offset;
9347 int i, j; 9591 int i, j, k;
9348
9349 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9350 if (IS_NOT_E1HMF_STAT(bp, i))
9351 continue;
9352 9592
9353 if (bnx2x_stats_arr[i].size == 0) { 9593 if (is_multi(bp)) {
9354 /* skip this counter */ 9594 k = 0;
9355 buf[j] = 0; 9595 for_each_queue(bp, i) {
9356 j++; 9596 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9357 continue; 9597 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9598 if (bnx2x_q_stats_arr[j].size == 0) {
9599 /* skip this counter */
9600 buf[k + j] = 0;
9601 continue;
9602 }
9603 offset = (hw_stats +
9604 bnx2x_q_stats_arr[j].offset);
9605 if (bnx2x_q_stats_arr[j].size == 4) {
9606 /* 4-byte counter */
9607 buf[k + j] = (u64) *offset;
9608 continue;
9609 }
9610 /* 8-byte counter */
9611 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9612 }
9613 k += BNX2X_NUM_Q_STATS;
9614 }
9615 if (IS_E1HMF_MODE_STAT(bp))
9616 return;
9617 hw_stats = (u32 *)&bp->eth_stats;
9618 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9619 if (bnx2x_stats_arr[j].size == 0) {
9620 /* skip this counter */
9621 buf[k + j] = 0;
9622 continue;
9623 }
9624 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9625 if (bnx2x_stats_arr[j].size == 4) {
9626 /* 4-byte counter */
9627 buf[k + j] = (u64) *offset;
9628 continue;
9629 }
9630 /* 8-byte counter */
9631 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9358 } 9632 }
9359 if (bnx2x_stats_arr[i].size == 4) { 9633 } else {
9360 /* 4-byte counter */ 9634 hw_stats = (u32 *)&bp->eth_stats;
9361 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset); 9635 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9636 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9637 continue;
9638 if (bnx2x_stats_arr[i].size == 0) {
9639 /* skip this counter */
9640 buf[j] = 0;
9641 j++;
9642 continue;
9643 }
9644 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9645 if (bnx2x_stats_arr[i].size == 4) {
9646 /* 4-byte counter */
9647 buf[j] = (u64) *offset;
9648 j++;
9649 continue;
9650 }
9651 /* 8-byte counter */
9652 buf[j] = HILO_U64(*offset, *(offset + 1));
9362 j++; 9653 j++;
9363 continue;
9364 } 9654 }
9365 /* 8-byte counter */
9366 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9367 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9368 j++;
9369 } 9655 }
9370} 9656}
9371 9657
@@ -9750,7 +10036,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9750 fp = &bp->fp[fp_index]; 10036 fp = &bp->fp[fp_index];
9751 10037
9752 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 10038 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9753 bp->eth_stats.driver_xoff++, 10039 fp->eth_q_stats.driver_xoff++,
9754 netif_tx_stop_queue(txq); 10040 netif_tx_stop_queue(txq);
9755 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 10041 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9756 return NETDEV_TX_BUSY; 10042 return NETDEV_TX_BUSY;
@@ -9991,7 +10277,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9991 if we put Tx into XOFF state. */ 10277 if we put Tx into XOFF state. */
9992 smp_mb(); 10278 smp_mb();
9993 netif_tx_stop_queue(txq); 10279 netif_tx_stop_queue(txq);
9994 bp->eth_stats.driver_xoff++; 10280 fp->eth_q_stats.driver_xoff++;
9995 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 10281 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9996 netif_tx_wake_queue(txq); 10282 netif_tx_wake_queue(txq);
9997 } 10283 }