aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSunil Goutham <sgoutham@cavium.com>2016-11-15 07:08:16 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-16 13:28:33 -0500
commit964cb69bdc9db255f7c3a80f6e1bed8a25e4c60e (patch)
tree2f07450b5aab178369483447eeadcc2a41d6e0a7
parentcadcf95a4f70362c96a8fe39ff5d5df830d4db7f (diff)
net: thunderx: Fix VF driver's interface statistics
This patch fixes multiple issues 1. Convert all driver statistics to percpu counters for accuracy. 2. To avoid multiple CQEs posted by a TSO packet appended to HW, TSO pkt's SQE has 'post_cqe' not set but a dummy SQE is added for getting HW transmit completion notification. This dummy SQE has 'dont_send' set and HW drops the pkt pointed to in this thus Tx drop counter increases. This patch fixes this by subtracting SW tx tso counter from HW Tx drop counter for actual packet drop counter. 3. Reset all individual queue's and VNIC HW stats when interface is going down. 4. Getrid off unnecessary counters in hot path. 5. Bringout all CQE error stats i.e both Rx and Tx. Signed-off-by: Sunil Goutham <sgoutham@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h61
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c106
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c96
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h24
6 files changed, 197 insertions, 196 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index cd2d379df5c5..86bd93ce2ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
178 178
179struct nicvf_hw_stats { 179struct nicvf_hw_stats {
180 u64 rx_bytes; 180 u64 rx_bytes;
181 u64 rx_frames;
181 u64 rx_ucast_frames; 182 u64 rx_ucast_frames;
182 u64 rx_bcast_frames; 183 u64 rx_bcast_frames;
183 u64 rx_mcast_frames; 184 u64 rx_mcast_frames;
184 u64 rx_fcs_errors; 185 u64 rx_drops;
185 u64 rx_l2_errors;
186 u64 rx_drop_red; 186 u64 rx_drop_red;
187 u64 rx_drop_red_bytes; 187 u64 rx_drop_red_bytes;
188 u64 rx_drop_overrun; 188 u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
191 u64 rx_drop_mcast; 191 u64 rx_drop_mcast;
192 u64 rx_drop_l3_bcast; 192 u64 rx_drop_l3_bcast;
193 u64 rx_drop_l3_mcast; 193 u64 rx_drop_l3_mcast;
194 u64 rx_fcs_errors;
195 u64 rx_l2_errors;
196
197 u64 tx_bytes;
198 u64 tx_frames;
199 u64 tx_ucast_frames;
200 u64 tx_bcast_frames;
201 u64 tx_mcast_frames;
202 u64 tx_drops;
203};
204
205struct nicvf_drv_stats {
206 /* CQE Rx errs */
194 u64 rx_bgx_truncated_pkts; 207 u64 rx_bgx_truncated_pkts;
195 u64 rx_jabber_errs; 208 u64 rx_jabber_errs;
196 u64 rx_fcs_errs; 209 u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
216 u64 rx_l4_pclp; 229 u64 rx_l4_pclp;
217 u64 rx_truncated_pkts; 230 u64 rx_truncated_pkts;
218 231
219 u64 tx_bytes_ok; 232 /* CQE Tx errs */
220 u64 tx_ucast_frames_ok; 233 u64 tx_desc_fault;
221 u64 tx_bcast_frames_ok; 234 u64 tx_hdr_cons_err;
222 u64 tx_mcast_frames_ok; 235 u64 tx_subdesc_err;
223 u64 tx_drops; 236 u64 tx_max_size_exceeded;
224}; 237 u64 tx_imm_size_oflow;
225 238 u64 tx_data_seq_err;
226struct nicvf_drv_stats { 239 u64 tx_mem_seq_err;
227 /* Rx */ 240 u64 tx_lock_viol;
228 u64 rx_frames_ok; 241 u64 tx_data_fault;
229 u64 rx_frames_64; 242 u64 tx_tstmp_conflict;
230 u64 rx_frames_127; 243 u64 tx_tstmp_timeout;
231 u64 rx_frames_255; 244 u64 tx_mem_fault;
232 u64 rx_frames_511; 245 u64 tx_csum_overlap;
233 u64 rx_frames_1023; 246 u64 tx_csum_overflow;
234 u64 rx_frames_1518; 247
235 u64 rx_frames_jumbo; 248 /* driver debug stats */
236 u64 rx_drops;
237
238 u64 rcv_buffer_alloc_failures; 249 u64 rcv_buffer_alloc_failures;
239
240 /* Tx */
241 u64 tx_frames_ok;
242 u64 tx_drops;
243 u64 tx_tso; 250 u64 tx_tso;
244 u64 tx_timeout; 251 u64 tx_timeout;
245 u64 txq_stop; 252 u64 txq_stop;
246 u64 txq_wake; 253 u64 txq_wake;
254
255 struct u64_stats_sync syncp;
247}; 256};
248 257
249struct nicvf { 258struct nicvf {
@@ -297,7 +306,7 @@ struct nicvf {
297 306
298 /* Stats */ 307 /* Stats */
299 struct nicvf_hw_stats hw_stats; 308 struct nicvf_hw_stats hw_stats;
300 struct nicvf_drv_stats drv_stats; 309 struct nicvf_drv_stats __percpu *drv_stats;
301 struct bgx_stats bgx_stats; 310 struct bgx_stats bgx_stats;
302 311
303 /* MSI-X */ 312 /* MSI-X */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 85c9e6201e8b..6677b96e1f3f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -851,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
851 nic_reg_write(nic, reg_addr, 0); 851 nic_reg_write(nic, reg_addr, 0);
852 } 852 }
853 } 853 }
854
854 return 0; 855 return 0;
855} 856}
856 857
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index ad4fddb55421..432bf6be57cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -36,11 +36,11 @@ struct nicvf_stat {
36 36
37static const struct nicvf_stat nicvf_hw_stats[] = { 37static const struct nicvf_stat nicvf_hw_stats[] = {
38 NICVF_HW_STAT(rx_bytes), 38 NICVF_HW_STAT(rx_bytes),
39 NICVF_HW_STAT(rx_frames),
39 NICVF_HW_STAT(rx_ucast_frames), 40 NICVF_HW_STAT(rx_ucast_frames),
40 NICVF_HW_STAT(rx_bcast_frames), 41 NICVF_HW_STAT(rx_bcast_frames),
41 NICVF_HW_STAT(rx_mcast_frames), 42 NICVF_HW_STAT(rx_mcast_frames),
42 NICVF_HW_STAT(rx_fcs_errors), 43 NICVF_HW_STAT(rx_drops),
43 NICVF_HW_STAT(rx_l2_errors),
44 NICVF_HW_STAT(rx_drop_red), 44 NICVF_HW_STAT(rx_drop_red),
45 NICVF_HW_STAT(rx_drop_red_bytes), 45 NICVF_HW_STAT(rx_drop_red_bytes),
46 NICVF_HW_STAT(rx_drop_overrun), 46 NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
49 NICVF_HW_STAT(rx_drop_mcast), 49 NICVF_HW_STAT(rx_drop_mcast),
50 NICVF_HW_STAT(rx_drop_l3_bcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast),
51 NICVF_HW_STAT(rx_drop_l3_mcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast),
52 NICVF_HW_STAT(rx_bgx_truncated_pkts), 52 NICVF_HW_STAT(rx_fcs_errors),
53 NICVF_HW_STAT(rx_jabber_errs), 53 NICVF_HW_STAT(rx_l2_errors),
54 NICVF_HW_STAT(rx_fcs_errs), 54 NICVF_HW_STAT(tx_bytes),
55 NICVF_HW_STAT(rx_bgx_errs), 55 NICVF_HW_STAT(tx_frames),
56 NICVF_HW_STAT(rx_prel2_errs), 56 NICVF_HW_STAT(tx_ucast_frames),
57 NICVF_HW_STAT(rx_l2_hdr_malformed), 57 NICVF_HW_STAT(tx_bcast_frames),
58 NICVF_HW_STAT(rx_oversize), 58 NICVF_HW_STAT(tx_mcast_frames),
59 NICVF_HW_STAT(rx_undersize), 59 NICVF_HW_STAT(tx_drops),
60 NICVF_HW_STAT(rx_l2_len_mismatch),
61 NICVF_HW_STAT(rx_l2_pclp),
62 NICVF_HW_STAT(rx_ip_ver_errs),
63 NICVF_HW_STAT(rx_ip_csum_errs),
64 NICVF_HW_STAT(rx_ip_hdr_malformed),
65 NICVF_HW_STAT(rx_ip_payload_malformed),
66 NICVF_HW_STAT(rx_ip_ttl_errs),
67 NICVF_HW_STAT(rx_l3_pclp),
68 NICVF_HW_STAT(rx_l4_malformed),
69 NICVF_HW_STAT(rx_l4_csum_errs),
70 NICVF_HW_STAT(rx_udp_len_errs),
71 NICVF_HW_STAT(rx_l4_port_errs),
72 NICVF_HW_STAT(rx_tcp_flag_errs),
73 NICVF_HW_STAT(rx_tcp_offset_errs),
74 NICVF_HW_STAT(rx_l4_pclp),
75 NICVF_HW_STAT(rx_truncated_pkts),
76 NICVF_HW_STAT(tx_bytes_ok),
77 NICVF_HW_STAT(tx_ucast_frames_ok),
78 NICVF_HW_STAT(tx_bcast_frames_ok),
79 NICVF_HW_STAT(tx_mcast_frames_ok),
80}; 60};
81 61
82static const struct nicvf_stat nicvf_drv_stats[] = { 62static const struct nicvf_stat nicvf_drv_stats[] = {
83 NICVF_DRV_STAT(rx_frames_ok), 63 NICVF_DRV_STAT(rx_bgx_truncated_pkts),
84 NICVF_DRV_STAT(rx_frames_64), 64 NICVF_DRV_STAT(rx_jabber_errs),
85 NICVF_DRV_STAT(rx_frames_127), 65 NICVF_DRV_STAT(rx_fcs_errs),
86 NICVF_DRV_STAT(rx_frames_255), 66 NICVF_DRV_STAT(rx_bgx_errs),
87 NICVF_DRV_STAT(rx_frames_511), 67 NICVF_DRV_STAT(rx_prel2_errs),
88 NICVF_DRV_STAT(rx_frames_1023), 68 NICVF_DRV_STAT(rx_l2_hdr_malformed),
89 NICVF_DRV_STAT(rx_frames_1518), 69 NICVF_DRV_STAT(rx_oversize),
90 NICVF_DRV_STAT(rx_frames_jumbo), 70 NICVF_DRV_STAT(rx_undersize),
91 NICVF_DRV_STAT(rx_drops), 71 NICVF_DRV_STAT(rx_l2_len_mismatch),
72 NICVF_DRV_STAT(rx_l2_pclp),
73 NICVF_DRV_STAT(rx_ip_ver_errs),
74 NICVF_DRV_STAT(rx_ip_csum_errs),
75 NICVF_DRV_STAT(rx_ip_hdr_malformed),
76 NICVF_DRV_STAT(rx_ip_payload_malformed),
77 NICVF_DRV_STAT(rx_ip_ttl_errs),
78 NICVF_DRV_STAT(rx_l3_pclp),
79 NICVF_DRV_STAT(rx_l4_malformed),
80 NICVF_DRV_STAT(rx_l4_csum_errs),
81 NICVF_DRV_STAT(rx_udp_len_errs),
82 NICVF_DRV_STAT(rx_l4_port_errs),
83 NICVF_DRV_STAT(rx_tcp_flag_errs),
84 NICVF_DRV_STAT(rx_tcp_offset_errs),
85 NICVF_DRV_STAT(rx_l4_pclp),
86 NICVF_DRV_STAT(rx_truncated_pkts),
87
88 NICVF_DRV_STAT(tx_desc_fault),
89 NICVF_DRV_STAT(tx_hdr_cons_err),
90 NICVF_DRV_STAT(tx_subdesc_err),
91 NICVF_DRV_STAT(tx_max_size_exceeded),
92 NICVF_DRV_STAT(tx_imm_size_oflow),
93 NICVF_DRV_STAT(tx_data_seq_err),
94 NICVF_DRV_STAT(tx_mem_seq_err),
95 NICVF_DRV_STAT(tx_lock_viol),
96 NICVF_DRV_STAT(tx_data_fault),
97 NICVF_DRV_STAT(tx_tstmp_conflict),
98 NICVF_DRV_STAT(tx_tstmp_timeout),
99 NICVF_DRV_STAT(tx_mem_fault),
100 NICVF_DRV_STAT(tx_csum_overlap),
101 NICVF_DRV_STAT(tx_csum_overflow),
102
92 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures),
93 NICVF_DRV_STAT(tx_frames_ok),
94 NICVF_DRV_STAT(tx_tso), 104 NICVF_DRV_STAT(tx_tso),
95 NICVF_DRV_STAT(tx_drops),
96 NICVF_DRV_STAT(tx_timeout), 105 NICVF_DRV_STAT(tx_timeout),
97 NICVF_DRV_STAT(txq_stop), 106 NICVF_DRV_STAT(txq_stop),
98 NICVF_DRV_STAT(txq_wake), 107 NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
278 struct ethtool_stats *stats, u64 *data) 287 struct ethtool_stats *stats, u64 *data)
279{ 288{
280 struct nicvf *nic = netdev_priv(netdev); 289 struct nicvf *nic = netdev_priv(netdev);
281 int stat; 290 int stat, tmp_stats;
282 int sqs; 291 int sqs, cpu;
283 292
284 nicvf_update_stats(nic); 293 nicvf_update_stats(nic);
285 294
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
289 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 298 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
290 *(data++) = ((u64 *)&nic->hw_stats) 299 *(data++) = ((u64 *)&nic->hw_stats)
291 [nicvf_hw_stats[stat].index]; 300 [nicvf_hw_stats[stat].index];
292 for (stat = 0; stat < nicvf_n_drv_stats; stat++) 301 for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
293 *(data++) = ((u64 *)&nic->drv_stats) 302 tmp_stats = 0;
294 [nicvf_drv_stats[stat].index]; 303 for_each_possible_cpu(cpu)
304 tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
305 [nicvf_drv_stats[stat].index];
306 *(data++) = tmp_stats;
307 }
295 308
296 nicvf_get_qset_stats(nic, stats, &data); 309 nicvf_get_qset_stats(nic, stats, &data);
297 310
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8f833612da77..9dc79c0578d8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
69 return qidx; 69 return qidx;
70} 70}
71 71
72static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
73 struct sk_buff *skb)
74{
75 if (skb->len <= 64)
76 nic->drv_stats.rx_frames_64++;
77 else if (skb->len <= 127)
78 nic->drv_stats.rx_frames_127++;
79 else if (skb->len <= 255)
80 nic->drv_stats.rx_frames_255++;
81 else if (skb->len <= 511)
82 nic->drv_stats.rx_frames_511++;
83 else if (skb->len <= 1023)
84 nic->drv_stats.rx_frames_1023++;
85 else if (skb->len <= 1518)
86 nic->drv_stats.rx_frames_1518++;
87 else
88 nic->drv_stats.rx_frames_jumbo++;
89}
90
91/* The Cavium ThunderX network controller can *only* be found in SoCs 72/* The Cavium ThunderX network controller can *only* be found in SoCs
92 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
93 * registers on this platform are implicitly strongly ordered with respect 74 * registers on this platform are implicitly strongly ordered with respect
@@ -514,7 +495,6 @@ static int nicvf_init_resources(struct nicvf *nic)
514} 495}
515 496
516static void nicvf_snd_pkt_handler(struct net_device *netdev, 497static void nicvf_snd_pkt_handler(struct net_device *netdev,
517 struct cmp_queue *cq,
518 struct cqe_send_t *cqe_tx, 498 struct cqe_send_t *cqe_tx,
519 int cqe_type, int budget, 499 int cqe_type, int budget,
520 unsigned int *tx_pkts, unsigned int *tx_bytes) 500 unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +516,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 516 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
537 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 517 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
538 518
539 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 519 nicvf_check_cqe_tx_errs(nic, cqe_tx);
540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 520 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
541 if (skb) { 521 if (skb) {
542 /* Check for dummy descriptor used for HW TSO offload on 88xx */ 522 /* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +610,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
630 return; 610 return;
631 } 611 }
632 612
633 nicvf_set_rx_frame_cnt(nic, skb);
634
635 nicvf_set_rxhash(netdev, cqe_rx, skb); 613 nicvf_set_rxhash(netdev, cqe_rx, skb);
636 614
637 skb_record_rx_queue(skb, rq_idx); 615 skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +681,7 @@ loop:
703 work_done++; 681 work_done++;
704 break; 682 break;
705 case CQE_TYPE_SEND: 683 case CQE_TYPE_SEND:
706 nicvf_snd_pkt_handler(netdev, cq, 684 nicvf_snd_pkt_handler(netdev,
707 (void *)cq_desc, CQE_TYPE_SEND, 685 (void *)cq_desc, CQE_TYPE_SEND,
708 budget, &tx_pkts, &tx_bytes); 686 budget, &tx_pkts, &tx_bytes);
709 tx_done++; 687 tx_done++;
@@ -740,7 +718,7 @@ done:
740 nic = nic->pnicvf; 718 nic = nic->pnicvf;
741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 719 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
742 netif_tx_start_queue(txq); 720 netif_tx_start_queue(txq);
743 nic->drv_stats.txq_wake++; 721 this_cpu_inc(nic->drv_stats->txq_wake);
744 if (netif_msg_tx_err(nic)) 722 if (netif_msg_tx_err(nic))
745 netdev_warn(netdev, 723 netdev_warn(netdev,
746 "%s: Transmit queue wakeup SQ%d\n", 724 "%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1062,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1084 1062
1085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 1063 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
1086 netif_tx_stop_queue(txq); 1064 netif_tx_stop_queue(txq);
1087 nic->drv_stats.txq_stop++; 1065 this_cpu_inc(nic->drv_stats->txq_stop);
1088 if (netif_msg_tx_err(nic)) 1066 if (netif_msg_tx_err(nic))
1089 netdev_warn(netdev, 1067 netdev_warn(netdev,
1090 "%s: Transmit ring full, stopping SQ%d\n", 1068 "%s: Transmit ring full, stopping SQ%d\n",
@@ -1202,7 +1180,7 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1202 1180
1203int nicvf_open(struct net_device *netdev) 1181int nicvf_open(struct net_device *netdev)
1204{ 1182{
1205 int err, qidx; 1183 int cpu, err, qidx;
1206 struct nicvf *nic = netdev_priv(netdev); 1184 struct nicvf *nic = netdev_priv(netdev);
1207 struct queue_set *qs = nic->qs; 1185 struct queue_set *qs = nic->qs;
1208 struct nicvf_cq_poll *cq_poll = NULL; 1186 struct nicvf_cq_poll *cq_poll = NULL;
@@ -1262,6 +1240,11 @@ int nicvf_open(struct net_device *netdev)
1262 nicvf_rss_init(nic); 1240 nicvf_rss_init(nic);
1263 if (nicvf_update_hw_max_frs(nic, netdev->mtu)) 1241 if (nicvf_update_hw_max_frs(nic, netdev->mtu))
1264 goto cleanup; 1242 goto cleanup;
1243
1244 /* Clear percpu stats */
1245 for_each_possible_cpu(cpu)
1246 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1247 sizeof(struct nicvf_drv_stats));
1265 } 1248 }
1266 1249
1267 err = nicvf_register_interrupts(nic); 1250 err = nicvf_register_interrupts(nic);
@@ -1288,9 +1271,6 @@ int nicvf_open(struct net_device *netdev)
1288 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1271 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1289 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1272 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1290 1273
1291 nic->drv_stats.txq_stop = 0;
1292 nic->drv_stats.txq_wake = 0;
1293
1294 return 0; 1274 return 0;
1295cleanup: 1275cleanup:
1296 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1276 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
@@ -1383,9 +1363,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
1383 1363
1384void nicvf_update_stats(struct nicvf *nic) 1364void nicvf_update_stats(struct nicvf *nic)
1385{ 1365{
1386 int qidx; 1366 int qidx, cpu;
1367 u64 tmp_stats = 0;
1387 struct nicvf_hw_stats *stats = &nic->hw_stats; 1368 struct nicvf_hw_stats *stats = &nic->hw_stats;
1388 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1369 struct nicvf_drv_stats *drv_stats;
1389 struct queue_set *qs = nic->qs; 1370 struct queue_set *qs = nic->qs;
1390 1371
1391#define GET_RX_STATS(reg) \ 1372#define GET_RX_STATS(reg) \
@@ -1408,21 +1389,33 @@ void nicvf_update_stats(struct nicvf *nic)
1408 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1389 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1409 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1390 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1410 1391
1411 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1392 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1412 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1393 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1413 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1394 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1414 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1395 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1415 stats->tx_drops = GET_TX_STATS(TX_DROP); 1396 stats->tx_drops = GET_TX_STATS(TX_DROP);
1416 1397
1417 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1398 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1418 stats->tx_bcast_frames_ok + 1399 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1419 stats->tx_mcast_frames_ok; 1400 * pointed by dummy SQE and results in tx_drops counter being
1420 drv_stats->rx_frames_ok = stats->rx_ucast_frames + 1401 * incremented. Subtracting it from tx_tso counter will give
1421 stats->rx_bcast_frames + 1402 * exact tx_drops counter.
1422 stats->rx_mcast_frames; 1403 */
1423 drv_stats->rx_drops = stats->rx_drop_red + 1404 if (nic->t88 && nic->hw_tso) {
1424 stats->rx_drop_overrun; 1405 for_each_possible_cpu(cpu) {
1425 drv_stats->tx_drops = stats->tx_drops; 1406 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1407 tmp_stats += drv_stats->tx_tso;
1408 }
1409 stats->tx_drops = tmp_stats - stats->tx_drops;
1410 }
1411 stats->tx_frames = stats->tx_ucast_frames +
1412 stats->tx_bcast_frames +
1413 stats->tx_mcast_frames;
1414 stats->rx_frames = stats->rx_ucast_frames +
1415 stats->rx_bcast_frames +
1416 stats->rx_mcast_frames;
1417 stats->rx_drops = stats->rx_drop_red +
1418 stats->rx_drop_overrun;
1426 1419
1427 /* Update RQ and SQ stats */ 1420 /* Update RQ and SQ stats */
1428 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1421 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1436,18 +1429,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1436{ 1429{
1437 struct nicvf *nic = netdev_priv(netdev); 1430 struct nicvf *nic = netdev_priv(netdev);
1438 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1431 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1439 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1440 1432
1441 nicvf_update_stats(nic); 1433 nicvf_update_stats(nic);
1442 1434
1443 stats->rx_bytes = hw_stats->rx_bytes; 1435 stats->rx_bytes = hw_stats->rx_bytes;
1444 stats->rx_packets = drv_stats->rx_frames_ok; 1436 stats->rx_packets = hw_stats->rx_frames;
1445 stats->rx_dropped = drv_stats->rx_drops; 1437 stats->rx_dropped = hw_stats->rx_drops;
1446 stats->multicast = hw_stats->rx_mcast_frames; 1438 stats->multicast = hw_stats->rx_mcast_frames;
1447 1439
1448 stats->tx_bytes = hw_stats->tx_bytes_ok; 1440 stats->tx_bytes = hw_stats->tx_bytes;
1449 stats->tx_packets = drv_stats->tx_frames_ok; 1441 stats->tx_packets = hw_stats->tx_frames;
1450 stats->tx_dropped = drv_stats->tx_drops; 1442 stats->tx_dropped = hw_stats->tx_drops;
1451 1443
1452 return stats; 1444 return stats;
1453} 1445}
@@ -1460,7 +1452,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
1460 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1452 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1461 dev->name); 1453 dev->name);
1462 1454
1463 nic->drv_stats.tx_timeout++; 1455 this_cpu_inc(nic->drv_stats->tx_timeout);
1464 schedule_work(&nic->reset_task); 1456 schedule_work(&nic->reset_task);
1465} 1457}
1466 1458
@@ -1594,6 +1586,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1594 goto err_free_netdev; 1586 goto err_free_netdev;
1595 } 1587 }
1596 1588
1589 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1590 if (!nic->drv_stats) {
1591 err = -ENOMEM;
1592 goto err_free_netdev;
1593 }
1594
1597 err = nicvf_set_qset_resources(nic); 1595 err = nicvf_set_qset_resources(nic);
1598 if (err) 1596 if (err)
1599 goto err_free_netdev; 1597 goto err_free_netdev;
@@ -1652,6 +1650,8 @@ err_unregister_interrupts:
1652 nicvf_unregister_interrupts(nic); 1650 nicvf_unregister_interrupts(nic);
1653err_free_netdev: 1651err_free_netdev:
1654 pci_set_drvdata(pdev, NULL); 1652 pci_set_drvdata(pdev, NULL);
1653 if (nic->drv_stats)
1654 free_percpu(nic->drv_stats);
1655 free_netdev(netdev); 1655 free_netdev(netdev);
1656err_release_regions: 1656err_release_regions:
1657 pci_release_regions(pdev); 1657 pci_release_regions(pdev);
@@ -1679,6 +1679,8 @@ static void nicvf_remove(struct pci_dev *pdev)
1679 unregister_netdev(pnetdev); 1679 unregister_netdev(pnetdev);
1680 nicvf_unregister_interrupts(nic); 1680 nicvf_unregister_interrupts(nic);
1681 pci_set_drvdata(pdev, NULL); 1681 pci_set_drvdata(pdev, NULL);
1682 if (nic->drv_stats)
1683 free_percpu(nic->drv_stats);
1682 free_netdev(netdev); 1684 free_netdev(netdev);
1683 pci_release_regions(pdev); 1685 pci_release_regions(pdev);
1684 pci_disable_device(pdev); 1686 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index f914eef6573a..bdce5915baae 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
105 order); 105 order);
106 if (!nic->rb_page) { 106 if (!nic->rb_page) {
107 nic->drv_stats.rcv_buffer_alloc_failures++; 107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
108 return -ENOMEM; 109 return -ENOMEM;
109 } 110 }
110 nic->rb_page_offset = 0; 111 nic->rb_page_offset = 0;
@@ -483,9 +484,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
483{ 484{
484 union nic_mbx mbx = {}; 485 union nic_mbx mbx = {};
485 486
486 /* Reset all RXQ's stats */ 487 /* Reset all RQ/SQ and VF stats */
487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 488 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
489 mbx.reset_stat.rx_stat_mask = 0x3FFF;
490 mbx.reset_stat.tx_stat_mask = 0x1F;
488 mbx.reset_stat.rq_stat_mask = 0xFFFF; 491 mbx.reset_stat.rq_stat_mask = 0xFFFF;
492 mbx.reset_stat.sq_stat_mask = 0xFFFF;
489 nicvf_send_msg_to_pf(nic, &mbx); 493 nicvf_send_msg_to_pf(nic, &mbx);
490} 494}
491 495
@@ -1032,7 +1036,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1032 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1036 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1033 /* For non-tunneled pkts, point this to L2 ethertype */ 1037 /* For non-tunneled pkts, point this to L2 ethertype */
1034 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1038 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1035 nic->drv_stats.tx_tso++; 1039 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1036 } 1040 }
1037} 1041}
1038 1042
@@ -1164,7 +1168,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1164 1168
1165 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1169 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1166 1170
1167 nic->drv_stats.tx_tso++; 1171 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1168 return 1; 1172 return 1;
1169} 1173}
1170 1174
@@ -1425,8 +1429,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1425/* Check for errors in the receive cmp.queue entry */ 1429/* Check for errors in the receive cmp.queue entry */
1426int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1430int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1427{ 1431{
1428 struct nicvf_hw_stats *stats = &nic->hw_stats;
1429
1430 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1432 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1431 return 0; 1433 return 0;
1432 1434
@@ -1438,76 +1440,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1438 1440
1439 switch (cqe_rx->err_opcode) { 1441 switch (cqe_rx->err_opcode) {
1440 case CQ_RX_ERROP_RE_PARTIAL: 1442 case CQ_RX_ERROP_RE_PARTIAL:
1441 stats->rx_bgx_truncated_pkts++; 1443 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1442 break; 1444 break;
1443 case CQ_RX_ERROP_RE_JABBER: 1445 case CQ_RX_ERROP_RE_JABBER:
1444 stats->rx_jabber_errs++; 1446 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1445 break; 1447 break;
1446 case CQ_RX_ERROP_RE_FCS: 1448 case CQ_RX_ERROP_RE_FCS:
1447 stats->rx_fcs_errs++; 1449 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1448 break; 1450 break;
1449 case CQ_RX_ERROP_RE_RX_CTL: 1451 case CQ_RX_ERROP_RE_RX_CTL:
1450 stats->rx_bgx_errs++; 1452 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1451 break; 1453 break;
1452 case CQ_RX_ERROP_PREL2_ERR: 1454 case CQ_RX_ERROP_PREL2_ERR:
1453 stats->rx_prel2_errs++; 1455 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1454 break; 1456 break;
1455 case CQ_RX_ERROP_L2_MAL: 1457 case CQ_RX_ERROP_L2_MAL:
1456 stats->rx_l2_hdr_malformed++; 1458 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1457 break; 1459 break;
1458 case CQ_RX_ERROP_L2_OVERSIZE: 1460 case CQ_RX_ERROP_L2_OVERSIZE:
1459 stats->rx_oversize++; 1461 this_cpu_inc(nic->drv_stats->rx_oversize);
1460 break; 1462 break;
1461 case CQ_RX_ERROP_L2_UNDERSIZE: 1463 case CQ_RX_ERROP_L2_UNDERSIZE:
1462 stats->rx_undersize++; 1464 this_cpu_inc(nic->drv_stats->rx_undersize);
1463 break; 1465 break;
1464 case CQ_RX_ERROP_L2_LENMISM: 1466 case CQ_RX_ERROP_L2_LENMISM:
1465 stats->rx_l2_len_mismatch++; 1467 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1466 break; 1468 break;
1467 case CQ_RX_ERROP_L2_PCLP: 1469 case CQ_RX_ERROP_L2_PCLP:
1468 stats->rx_l2_pclp++; 1470 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1469 break; 1471 break;
1470 case CQ_RX_ERROP_IP_NOT: 1472 case CQ_RX_ERROP_IP_NOT:
1471 stats->rx_ip_ver_errs++; 1473 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1472 break; 1474 break;
1473 case CQ_RX_ERROP_IP_CSUM_ERR: 1475 case CQ_RX_ERROP_IP_CSUM_ERR:
1474 stats->rx_ip_csum_errs++; 1476 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1475 break; 1477 break;
1476 case CQ_RX_ERROP_IP_MAL: 1478 case CQ_RX_ERROP_IP_MAL:
1477 stats->rx_ip_hdr_malformed++; 1479 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1478 break; 1480 break;
1479 case CQ_RX_ERROP_IP_MALD: 1481 case CQ_RX_ERROP_IP_MALD:
1480 stats->rx_ip_payload_malformed++; 1482 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1481 break; 1483 break;
1482 case CQ_RX_ERROP_IP_HOP: 1484 case CQ_RX_ERROP_IP_HOP:
1483 stats->rx_ip_ttl_errs++; 1485 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1484 break; 1486 break;
1485 case CQ_RX_ERROP_L3_PCLP: 1487 case CQ_RX_ERROP_L3_PCLP:
1486 stats->rx_l3_pclp++; 1488 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1487 break; 1489 break;
1488 case CQ_RX_ERROP_L4_MAL: 1490 case CQ_RX_ERROP_L4_MAL:
1489 stats->rx_l4_malformed++; 1491 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1490 break; 1492 break;
1491 case CQ_RX_ERROP_L4_CHK: 1493 case CQ_RX_ERROP_L4_CHK:
1492 stats->rx_l4_csum_errs++; 1494 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1493 break; 1495 break;
1494 case CQ_RX_ERROP_UDP_LEN: 1496 case CQ_RX_ERROP_UDP_LEN:
1495 stats->rx_udp_len_errs++; 1497 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1496 break; 1498 break;
1497 case CQ_RX_ERROP_L4_PORT: 1499 case CQ_RX_ERROP_L4_PORT:
1498 stats->rx_l4_port_errs++; 1500 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1499 break; 1501 break;
1500 case CQ_RX_ERROP_TCP_FLAG: 1502 case CQ_RX_ERROP_TCP_FLAG:
1501 stats->rx_tcp_flag_errs++; 1503 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1502 break; 1504 break;
1503 case CQ_RX_ERROP_TCP_OFFSET: 1505 case CQ_RX_ERROP_TCP_OFFSET:
1504 stats->rx_tcp_offset_errs++; 1506 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1505 break; 1507 break;
1506 case CQ_RX_ERROP_L4_PCLP: 1508 case CQ_RX_ERROP_L4_PCLP:
1507 stats->rx_l4_pclp++; 1509 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1508 break; 1510 break;
1509 case CQ_RX_ERROP_RBDR_TRUNC: 1511 case CQ_RX_ERROP_RBDR_TRUNC:
1510 stats->rx_truncated_pkts++; 1512 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1511 break; 1513 break;
1512 } 1514 }
1513 1515
@@ -1515,56 +1517,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1515} 1517}
1516 1518
1517/* Check for errors in the send cmp.queue entry */ 1519/* Check for errors in the send cmp.queue entry */
1518int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1520int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1519 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1520{ 1521{
1521 struct cmp_queue_stats *stats = &cq->stats;
1522
1523 switch (cqe_tx->send_status) { 1522 switch (cqe_tx->send_status) {
1524 case CQ_TX_ERROP_GOOD: 1523 case CQ_TX_ERROP_GOOD:
1525 stats->tx.good++;
1526 return 0; 1524 return 0;
1527 case CQ_TX_ERROP_DESC_FAULT: 1525 case CQ_TX_ERROP_DESC_FAULT:
1528 stats->tx.desc_fault++; 1526 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1529 break; 1527 break;
1530 case CQ_TX_ERROP_HDR_CONS_ERR: 1528 case CQ_TX_ERROP_HDR_CONS_ERR:
1531 stats->tx.hdr_cons_err++; 1529 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1532 break; 1530 break;
1533 case CQ_TX_ERROP_SUBDC_ERR: 1531 case CQ_TX_ERROP_SUBDC_ERR:
1534 stats->tx.subdesc_err++; 1532 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1535 break; 1533 break;
1536 case CQ_TX_ERROP_MAX_SIZE_VIOL: 1534 case CQ_TX_ERROP_MAX_SIZE_VIOL:
1537 stats->tx.max_size_exceeded++; 1535 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1538 break; 1536 break;
1539 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1537 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1540 stats->tx.imm_size_oflow++; 1538 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1541 break; 1539 break;
1542 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1540 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1543 stats->tx.data_seq_err++; 1541 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1544 break; 1542 break;
1545 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1543 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1546 stats->tx.mem_seq_err++; 1544 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1547 break; 1545 break;
1548 case CQ_TX_ERROP_LOCK_VIOL: 1546 case CQ_TX_ERROP_LOCK_VIOL:
1549 stats->tx.lock_viol++; 1547 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1550 break; 1548 break;
1551 case CQ_TX_ERROP_DATA_FAULT: 1549 case CQ_TX_ERROP_DATA_FAULT:
1552 stats->tx.data_fault++; 1550 this_cpu_inc(nic->drv_stats->tx_data_fault);
1553 break; 1551 break;
1554 case CQ_TX_ERROP_TSTMP_CONFLICT: 1552 case CQ_TX_ERROP_TSTMP_CONFLICT:
1555 stats->tx.tstmp_conflict++; 1553 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1556 break; 1554 break;
1557 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1555 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1558 stats->tx.tstmp_timeout++; 1556 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1559 break; 1557 break;
1560 case CQ_TX_ERROP_MEM_FAULT: 1558 case CQ_TX_ERROP_MEM_FAULT:
1561 stats->tx.mem_fault++; 1559 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1562 break; 1560 break;
1563 case CQ_TX_ERROP_CK_OVERLAP: 1561 case CQ_TX_ERROP_CK_OVERLAP:
1564 stats->tx.csum_overlap++; 1562 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1565 break; 1563 break;
1566 case CQ_TX_ERROP_CK_OFLOW: 1564 case CQ_TX_ERROP_CK_OFLOW:
1567 stats->tx.csum_overflow++; 1565 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
1568 break; 1566 break;
1569 } 1567 }
1570 1568
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8f4718edc0fe..2e3c940c1093 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -172,26 +172,6 @@ enum CQ_TX_ERROP_E {
172 CQ_TX_ERROP_ENUM_LAST = 0x8a, 172 CQ_TX_ERROP_ENUM_LAST = 0x8a,
173}; 173};
174 174
175struct cmp_queue_stats {
176 struct tx_stats {
177 u64 good;
178 u64 desc_fault;
179 u64 hdr_cons_err;
180 u64 subdesc_err;
181 u64 max_size_exceeded;
182 u64 imm_size_oflow;
183 u64 data_seq_err;
184 u64 mem_seq_err;
185 u64 lock_viol;
186 u64 data_fault;
187 u64 tstmp_conflict;
188 u64 tstmp_timeout;
189 u64 mem_fault;
190 u64 csum_overlap;
191 u64 csum_overflow;
192 } tx;
193} ____cacheline_aligned_in_smp;
194
195enum RQ_SQ_STATS { 175enum RQ_SQ_STATS {
196 RQ_SQ_STATS_OCTS, 176 RQ_SQ_STATS_OCTS,
197 RQ_SQ_STATS_PKTS, 177 RQ_SQ_STATS_PKTS,
@@ -243,7 +223,6 @@ struct cmp_queue {
243 spinlock_t lock; /* lock to serialize processing CQEs */ 223 spinlock_t lock; /* lock to serialize processing CQEs */
244 void *desc; 224 void *desc;
245 struct q_desc_mem dmem; 225 struct q_desc_mem dmem;
246 struct cmp_queue_stats stats;
247 int irq; 226 int irq;
248} ____cacheline_aligned_in_smp; 227} ____cacheline_aligned_in_smp;
249 228
@@ -338,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
338void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 317void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
339void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 318void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
340int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 319int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
341int nicvf_check_cqe_tx_errs(struct nicvf *nic, 320int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
342 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
343#endif /* NICVF_QUEUES_H */ 321#endif /* NICVF_QUEUES_H */