aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-11-16 13:28:34 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-16 13:28:34 -0500
commita7741713dd361f081e5b48c04f59d0bbb1f32ed3 (patch)
tree163999dc96988c0b9b03c7227df37c79c9ed6ac6
parentb71de936c38e80d1f059fd54d8704e9d86d6bd10 (diff)
parentc94acf805d93e7beb5898ac97ff327ae0b6f04dd (diff)
Merge branch 'thunderx-fixes'
Sunil Goutham says: ==================== net: thunderx: Miscellaneous fixes This patchset includes fixes for incorrect LMAC credits, unreliable driver statistics, memory leak upon interface down e.t.c Changes from v1: - As suggested replaced bit shifting with BIT() macro in the patch 'Fix configuration of L3/L4 length checking'. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c153
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c118
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h24
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
9 files changed, 274 insertions, 234 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 30426109711c..86bd93ce2ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -47,7 +47,7 @@
47 47
48/* Min/Max packet size */ 48/* Min/Max packet size */
49#define NIC_HW_MIN_FRS 64 49#define NIC_HW_MIN_FRS 64
50#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ 50#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
51 51
52/* Max pkinds */ 52/* Max pkinds */
53#define NIC_MAX_PKIND 16 53#define NIC_MAX_PKIND 16
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
178 178
179struct nicvf_hw_stats { 179struct nicvf_hw_stats {
180 u64 rx_bytes; 180 u64 rx_bytes;
181 u64 rx_frames;
181 u64 rx_ucast_frames; 182 u64 rx_ucast_frames;
182 u64 rx_bcast_frames; 183 u64 rx_bcast_frames;
183 u64 rx_mcast_frames; 184 u64 rx_mcast_frames;
184 u64 rx_fcs_errors; 185 u64 rx_drops;
185 u64 rx_l2_errors;
186 u64 rx_drop_red; 186 u64 rx_drop_red;
187 u64 rx_drop_red_bytes; 187 u64 rx_drop_red_bytes;
188 u64 rx_drop_overrun; 188 u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
191 u64 rx_drop_mcast; 191 u64 rx_drop_mcast;
192 u64 rx_drop_l3_bcast; 192 u64 rx_drop_l3_bcast;
193 u64 rx_drop_l3_mcast; 193 u64 rx_drop_l3_mcast;
194 u64 rx_fcs_errors;
195 u64 rx_l2_errors;
196
197 u64 tx_bytes;
198 u64 tx_frames;
199 u64 tx_ucast_frames;
200 u64 tx_bcast_frames;
201 u64 tx_mcast_frames;
202 u64 tx_drops;
203};
204
205struct nicvf_drv_stats {
206 /* CQE Rx errs */
194 u64 rx_bgx_truncated_pkts; 207 u64 rx_bgx_truncated_pkts;
195 u64 rx_jabber_errs; 208 u64 rx_jabber_errs;
196 u64 rx_fcs_errs; 209 u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
216 u64 rx_l4_pclp; 229 u64 rx_l4_pclp;
217 u64 rx_truncated_pkts; 230 u64 rx_truncated_pkts;
218 231
219 u64 tx_bytes_ok; 232 /* CQE Tx errs */
220 u64 tx_ucast_frames_ok; 233 u64 tx_desc_fault;
221 u64 tx_bcast_frames_ok; 234 u64 tx_hdr_cons_err;
222 u64 tx_mcast_frames_ok; 235 u64 tx_subdesc_err;
223 u64 tx_drops; 236 u64 tx_max_size_exceeded;
224}; 237 u64 tx_imm_size_oflow;
225 238 u64 tx_data_seq_err;
226struct nicvf_drv_stats { 239 u64 tx_mem_seq_err;
227 /* Rx */ 240 u64 tx_lock_viol;
228 u64 rx_frames_ok; 241 u64 tx_data_fault;
229 u64 rx_frames_64; 242 u64 tx_tstmp_conflict;
230 u64 rx_frames_127; 243 u64 tx_tstmp_timeout;
231 u64 rx_frames_255; 244 u64 tx_mem_fault;
232 u64 rx_frames_511; 245 u64 tx_csum_overlap;
233 u64 rx_frames_1023; 246 u64 tx_csum_overflow;
234 u64 rx_frames_1518; 247
235 u64 rx_frames_jumbo; 248 /* driver debug stats */
236 u64 rx_drops;
237
238 u64 rcv_buffer_alloc_failures; 249 u64 rcv_buffer_alloc_failures;
239
240 /* Tx */
241 u64 tx_frames_ok;
242 u64 tx_drops;
243 u64 tx_tso; 250 u64 tx_tso;
244 u64 tx_timeout; 251 u64 tx_timeout;
245 u64 txq_stop; 252 u64 txq_stop;
246 u64 txq_wake; 253 u64 txq_wake;
254
255 struct u64_stats_sync syncp;
247}; 256};
248 257
249struct nicvf { 258struct nicvf {
@@ -282,7 +291,6 @@ struct nicvf {
282 291
283 u8 node; 292 u8 node;
284 u8 cpi_alg; 293 u8 cpi_alg;
285 u16 mtu;
286 bool link_up; 294 bool link_up;
287 u8 duplex; 295 u8 duplex;
288 u32 speed; 296 u32 speed;
@@ -298,7 +306,7 @@ struct nicvf {
298 306
299 /* Stats */ 307 /* Stats */
300 struct nicvf_hw_stats hw_stats; 308 struct nicvf_hw_stats hw_stats;
301 struct nicvf_drv_stats drv_stats; 309 struct nicvf_drv_stats __percpu *drv_stats;
302 struct bgx_stats bgx_stats; 310 struct bgx_stats bgx_stats;
303 311
304 /* MSI-X */ 312 /* MSI-X */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 2bbf4cbf08b2..6677b96e1f3f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -11,6 +11,7 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/if_vlan.h>
14 15
15#include "nic_reg.h" 16#include "nic_reg.h"
16#include "nic.h" 17#include "nic.h"
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
260/* Update hardware min/max frame size */ 261/* Update hardware min/max frame size */
261static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 262static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
262{ 263{
263 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 264 int bgx, lmac, lmac_cnt;
264 dev_err(&nic->pdev->dev, 265 u64 lmac_credits;
265 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", 266
266 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 267 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
267 return 1; 268 return 1;
268 }
269 new_frs += ETH_HLEN;
270 if (new_frs <= nic->pkind.maxlen)
271 return 0;
272 269
273 nic->pkind.maxlen = new_frs; 270 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
274 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); 271 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
272 lmac += bgx * MAX_LMAC_PER_BGX;
273
274 new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
275
276 /* Update corresponding LMAC credits */
277 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
278 lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
279 lmac_credits &= ~(0xFFFFFULL << 12);
280 lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
281 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
282
283 /* Enforce MTU in HW
284 * This config is supported only from 88xx pass 2.0 onwards.
285 */
286 if (!pass1_silicon(nic->pdev))
287 nic_reg_write(nic,
288 NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
275 return 0; 289 return 0;
276} 290}
277 291
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
464 478
465 /* PKIND configuration */ 479 /* PKIND configuration */
466 nic->pkind.minlen = 0; 480 nic->pkind.minlen = 0;
467 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; 481 nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
468 nic->pkind.lenerr_en = 1; 482 nic->pkind.lenerr_en = 1;
469 nic->pkind.rx_hdr = 0; 483 nic->pkind.rx_hdr = 0;
470 nic->pkind.hdr_sl = 0; 484 nic->pkind.hdr_sl = 0;
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
837 nic_reg_write(nic, reg_addr, 0); 851 nic_reg_write(nic, reg_addr, 0);
838 } 852 }
839 } 853 }
854
840 return 0; 855 return 0;
841} 856}
842 857
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index edf779f5a227..80d46337cf29 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -106,6 +106,7 @@
106#define NIC_PF_MPI_0_2047_CFG (0x210000) 106#define NIC_PF_MPI_0_2047_CFG (0x210000)
107#define NIC_PF_RSSI_0_4097_RQ (0x220000) 107#define NIC_PF_RSSI_0_4097_RQ (0x220000)
108#define NIC_PF_LMAC_0_7_CFG (0x240000) 108#define NIC_PF_LMAC_0_7_CFG (0x240000)
109#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
109#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) 110#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
110#define NIC_PF_LMAC_0_7_CREDIT (0x244000) 111#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
111#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) 112#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index ad4fddb55421..432bf6be57cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -36,11 +36,11 @@ struct nicvf_stat {
36 36
37static const struct nicvf_stat nicvf_hw_stats[] = { 37static const struct nicvf_stat nicvf_hw_stats[] = {
38 NICVF_HW_STAT(rx_bytes), 38 NICVF_HW_STAT(rx_bytes),
39 NICVF_HW_STAT(rx_frames),
39 NICVF_HW_STAT(rx_ucast_frames), 40 NICVF_HW_STAT(rx_ucast_frames),
40 NICVF_HW_STAT(rx_bcast_frames), 41 NICVF_HW_STAT(rx_bcast_frames),
41 NICVF_HW_STAT(rx_mcast_frames), 42 NICVF_HW_STAT(rx_mcast_frames),
42 NICVF_HW_STAT(rx_fcs_errors), 43 NICVF_HW_STAT(rx_drops),
43 NICVF_HW_STAT(rx_l2_errors),
44 NICVF_HW_STAT(rx_drop_red), 44 NICVF_HW_STAT(rx_drop_red),
45 NICVF_HW_STAT(rx_drop_red_bytes), 45 NICVF_HW_STAT(rx_drop_red_bytes),
46 NICVF_HW_STAT(rx_drop_overrun), 46 NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
49 NICVF_HW_STAT(rx_drop_mcast), 49 NICVF_HW_STAT(rx_drop_mcast),
50 NICVF_HW_STAT(rx_drop_l3_bcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast),
51 NICVF_HW_STAT(rx_drop_l3_mcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast),
52 NICVF_HW_STAT(rx_bgx_truncated_pkts), 52 NICVF_HW_STAT(rx_fcs_errors),
53 NICVF_HW_STAT(rx_jabber_errs), 53 NICVF_HW_STAT(rx_l2_errors),
54 NICVF_HW_STAT(rx_fcs_errs), 54 NICVF_HW_STAT(tx_bytes),
55 NICVF_HW_STAT(rx_bgx_errs), 55 NICVF_HW_STAT(tx_frames),
56 NICVF_HW_STAT(rx_prel2_errs), 56 NICVF_HW_STAT(tx_ucast_frames),
57 NICVF_HW_STAT(rx_l2_hdr_malformed), 57 NICVF_HW_STAT(tx_bcast_frames),
58 NICVF_HW_STAT(rx_oversize), 58 NICVF_HW_STAT(tx_mcast_frames),
59 NICVF_HW_STAT(rx_undersize), 59 NICVF_HW_STAT(tx_drops),
60 NICVF_HW_STAT(rx_l2_len_mismatch),
61 NICVF_HW_STAT(rx_l2_pclp),
62 NICVF_HW_STAT(rx_ip_ver_errs),
63 NICVF_HW_STAT(rx_ip_csum_errs),
64 NICVF_HW_STAT(rx_ip_hdr_malformed),
65 NICVF_HW_STAT(rx_ip_payload_malformed),
66 NICVF_HW_STAT(rx_ip_ttl_errs),
67 NICVF_HW_STAT(rx_l3_pclp),
68 NICVF_HW_STAT(rx_l4_malformed),
69 NICVF_HW_STAT(rx_l4_csum_errs),
70 NICVF_HW_STAT(rx_udp_len_errs),
71 NICVF_HW_STAT(rx_l4_port_errs),
72 NICVF_HW_STAT(rx_tcp_flag_errs),
73 NICVF_HW_STAT(rx_tcp_offset_errs),
74 NICVF_HW_STAT(rx_l4_pclp),
75 NICVF_HW_STAT(rx_truncated_pkts),
76 NICVF_HW_STAT(tx_bytes_ok),
77 NICVF_HW_STAT(tx_ucast_frames_ok),
78 NICVF_HW_STAT(tx_bcast_frames_ok),
79 NICVF_HW_STAT(tx_mcast_frames_ok),
80}; 60};
81 61
82static const struct nicvf_stat nicvf_drv_stats[] = { 62static const struct nicvf_stat nicvf_drv_stats[] = {
83 NICVF_DRV_STAT(rx_frames_ok), 63 NICVF_DRV_STAT(rx_bgx_truncated_pkts),
84 NICVF_DRV_STAT(rx_frames_64), 64 NICVF_DRV_STAT(rx_jabber_errs),
85 NICVF_DRV_STAT(rx_frames_127), 65 NICVF_DRV_STAT(rx_fcs_errs),
86 NICVF_DRV_STAT(rx_frames_255), 66 NICVF_DRV_STAT(rx_bgx_errs),
87 NICVF_DRV_STAT(rx_frames_511), 67 NICVF_DRV_STAT(rx_prel2_errs),
88 NICVF_DRV_STAT(rx_frames_1023), 68 NICVF_DRV_STAT(rx_l2_hdr_malformed),
89 NICVF_DRV_STAT(rx_frames_1518), 69 NICVF_DRV_STAT(rx_oversize),
90 NICVF_DRV_STAT(rx_frames_jumbo), 70 NICVF_DRV_STAT(rx_undersize),
91 NICVF_DRV_STAT(rx_drops), 71 NICVF_DRV_STAT(rx_l2_len_mismatch),
72 NICVF_DRV_STAT(rx_l2_pclp),
73 NICVF_DRV_STAT(rx_ip_ver_errs),
74 NICVF_DRV_STAT(rx_ip_csum_errs),
75 NICVF_DRV_STAT(rx_ip_hdr_malformed),
76 NICVF_DRV_STAT(rx_ip_payload_malformed),
77 NICVF_DRV_STAT(rx_ip_ttl_errs),
78 NICVF_DRV_STAT(rx_l3_pclp),
79 NICVF_DRV_STAT(rx_l4_malformed),
80 NICVF_DRV_STAT(rx_l4_csum_errs),
81 NICVF_DRV_STAT(rx_udp_len_errs),
82 NICVF_DRV_STAT(rx_l4_port_errs),
83 NICVF_DRV_STAT(rx_tcp_flag_errs),
84 NICVF_DRV_STAT(rx_tcp_offset_errs),
85 NICVF_DRV_STAT(rx_l4_pclp),
86 NICVF_DRV_STAT(rx_truncated_pkts),
87
88 NICVF_DRV_STAT(tx_desc_fault),
89 NICVF_DRV_STAT(tx_hdr_cons_err),
90 NICVF_DRV_STAT(tx_subdesc_err),
91 NICVF_DRV_STAT(tx_max_size_exceeded),
92 NICVF_DRV_STAT(tx_imm_size_oflow),
93 NICVF_DRV_STAT(tx_data_seq_err),
94 NICVF_DRV_STAT(tx_mem_seq_err),
95 NICVF_DRV_STAT(tx_lock_viol),
96 NICVF_DRV_STAT(tx_data_fault),
97 NICVF_DRV_STAT(tx_tstmp_conflict),
98 NICVF_DRV_STAT(tx_tstmp_timeout),
99 NICVF_DRV_STAT(tx_mem_fault),
100 NICVF_DRV_STAT(tx_csum_overlap),
101 NICVF_DRV_STAT(tx_csum_overflow),
102
92 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures),
93 NICVF_DRV_STAT(tx_frames_ok),
94 NICVF_DRV_STAT(tx_tso), 104 NICVF_DRV_STAT(tx_tso),
95 NICVF_DRV_STAT(tx_drops),
96 NICVF_DRV_STAT(tx_timeout), 105 NICVF_DRV_STAT(tx_timeout),
97 NICVF_DRV_STAT(txq_stop), 106 NICVF_DRV_STAT(txq_stop),
98 NICVF_DRV_STAT(txq_wake), 107 NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
278 struct ethtool_stats *stats, u64 *data) 287 struct ethtool_stats *stats, u64 *data)
279{ 288{
280 struct nicvf *nic = netdev_priv(netdev); 289 struct nicvf *nic = netdev_priv(netdev);
281 int stat; 290 int stat, tmp_stats;
282 int sqs; 291 int sqs, cpu;
283 292
284 nicvf_update_stats(nic); 293 nicvf_update_stats(nic);
285 294
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
289 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 298 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
290 *(data++) = ((u64 *)&nic->hw_stats) 299 *(data++) = ((u64 *)&nic->hw_stats)
291 [nicvf_hw_stats[stat].index]; 300 [nicvf_hw_stats[stat].index];
292 for (stat = 0; stat < nicvf_n_drv_stats; stat++) 301 for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
293 *(data++) = ((u64 *)&nic->drv_stats) 302 tmp_stats = 0;
294 [nicvf_drv_stats[stat].index]; 303 for_each_possible_cpu(cpu)
304 tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
305 [nicvf_drv_stats[stat].index];
306 *(data++) = tmp_stats;
307 }
295 308
296 nicvf_get_qset_stats(nic, stats, &data); 309 nicvf_get_qset_stats(nic, stats, &data);
297 310
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 45a13f718863..8a37012c9c89 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
69 return qidx; 69 return qidx;
70} 70}
71 71
72static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
73 struct sk_buff *skb)
74{
75 if (skb->len <= 64)
76 nic->drv_stats.rx_frames_64++;
77 else if (skb->len <= 127)
78 nic->drv_stats.rx_frames_127++;
79 else if (skb->len <= 255)
80 nic->drv_stats.rx_frames_255++;
81 else if (skb->len <= 511)
82 nic->drv_stats.rx_frames_511++;
83 else if (skb->len <= 1023)
84 nic->drv_stats.rx_frames_1023++;
85 else if (skb->len <= 1518)
86 nic->drv_stats.rx_frames_1518++;
87 else
88 nic->drv_stats.rx_frames_jumbo++;
89}
90
91/* The Cavium ThunderX network controller can *only* be found in SoCs 72/* The Cavium ThunderX network controller can *only* be found in SoCs
92 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
93 * registers on this platform are implicitly strongly ordered with respect 74 * registers on this platform are implicitly strongly ordered with respect
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
492static int nicvf_init_resources(struct nicvf *nic) 473static int nicvf_init_resources(struct nicvf *nic)
493{ 474{
494 int err; 475 int err;
495 union nic_mbx mbx = {};
496
497 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
498 476
499 /* Enable Qset */ 477 /* Enable Qset */
500 nicvf_qset_config(nic, true); 478 nicvf_qset_config(nic, true);
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
507 return err; 485 return err;
508 } 486 }
509 487
510 /* Send VF config done msg to PF */
511 nicvf_write_to_mbx(nic, &mbx);
512
513 return 0; 488 return 0;
514} 489}
515 490
516static void nicvf_snd_pkt_handler(struct net_device *netdev, 491static void nicvf_snd_pkt_handler(struct net_device *netdev,
517 struct cmp_queue *cq,
518 struct cqe_send_t *cqe_tx, 492 struct cqe_send_t *cqe_tx,
519 int cqe_type, int budget, 493 int cqe_type, int budget,
520 unsigned int *tx_pkts, unsigned int *tx_bytes) 494 unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 510 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
537 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 511 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
538 512
539 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 513 nicvf_check_cqe_tx_errs(nic, cqe_tx);
540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 514 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
541 if (skb) { 515 if (skb) {
542 /* Check for dummy descriptor used for HW TSO offload on 88xx */ 516 /* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
630 return; 604 return;
631 } 605 }
632 606
633 nicvf_set_rx_frame_cnt(nic, skb);
634
635 nicvf_set_rxhash(netdev, cqe_rx, skb); 607 nicvf_set_rxhash(netdev, cqe_rx, skb);
636 608
637 skb_record_rx_queue(skb, rq_idx); 609 skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +675,7 @@ loop:
703 work_done++; 675 work_done++;
704 break; 676 break;
705 case CQE_TYPE_SEND: 677 case CQE_TYPE_SEND:
706 nicvf_snd_pkt_handler(netdev, cq, 678 nicvf_snd_pkt_handler(netdev,
707 (void *)cq_desc, CQE_TYPE_SEND, 679 (void *)cq_desc, CQE_TYPE_SEND,
708 budget, &tx_pkts, &tx_bytes); 680 budget, &tx_pkts, &tx_bytes);
709 tx_done++; 681 tx_done++;
@@ -740,7 +712,7 @@ done:
740 nic = nic->pnicvf; 712 nic = nic->pnicvf;
741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 713 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
742 netif_tx_start_queue(txq); 714 netif_tx_start_queue(txq);
743 nic->drv_stats.txq_wake++; 715 this_cpu_inc(nic->drv_stats->txq_wake);
744 if (netif_msg_tx_err(nic)) 716 if (netif_msg_tx_err(nic))
745 netdev_warn(netdev, 717 netdev_warn(netdev,
746 "%s: Transmit queue wakeup SQ%d\n", 718 "%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1084 1056
1085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 1057 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
1086 netif_tx_stop_queue(txq); 1058 netif_tx_stop_queue(txq);
1087 nic->drv_stats.txq_stop++; 1059 this_cpu_inc(nic->drv_stats->txq_stop);
1088 if (netif_msg_tx_err(nic)) 1060 if (netif_msg_tx_err(nic))
1089 netdev_warn(netdev, 1061 netdev_warn(netdev,
1090 "%s: Transmit ring full, stopping SQ%d\n", 1062 "%s: Transmit ring full, stopping SQ%d\n",
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
1189 return 0; 1161 return 0;
1190} 1162}
1191 1163
1164static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1165{
1166 union nic_mbx mbx = {};
1167
1168 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1169 mbx.frs.max_frs = mtu;
1170 mbx.frs.vf_id = nic->vf_id;
1171
1172 return nicvf_send_msg_to_pf(nic, &mbx);
1173}
1174
1192int nicvf_open(struct net_device *netdev) 1175int nicvf_open(struct net_device *netdev)
1193{ 1176{
1194 int err, qidx; 1177 int cpu, err, qidx;
1195 struct nicvf *nic = netdev_priv(netdev); 1178 struct nicvf *nic = netdev_priv(netdev);
1196 struct queue_set *qs = nic->qs; 1179 struct queue_set *qs = nic->qs;
1197 struct nicvf_cq_poll *cq_poll = NULL; 1180 struct nicvf_cq_poll *cq_poll = NULL;
1198 1181 union nic_mbx mbx = {};
1199 nic->mtu = netdev->mtu;
1200 1182
1201 netif_carrier_off(netdev); 1183 netif_carrier_off(netdev);
1202 1184
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
1248 if (nic->sqs_mode) 1230 if (nic->sqs_mode)
1249 nicvf_get_primary_vf_struct(nic); 1231 nicvf_get_primary_vf_struct(nic);
1250 1232
1251 /* Configure receive side scaling */ 1233 /* Configure receive side scaling and MTU */
1252 if (!nic->sqs_mode) 1234 if (!nic->sqs_mode) {
1253 nicvf_rss_init(nic); 1235 nicvf_rss_init(nic);
1236 if (nicvf_update_hw_max_frs(nic, netdev->mtu))
1237 goto cleanup;
1238
1239 /* Clear percpu stats */
1240 for_each_possible_cpu(cpu)
1241 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1242 sizeof(struct nicvf_drv_stats));
1243 }
1254 1244
1255 err = nicvf_register_interrupts(nic); 1245 err = nicvf_register_interrupts(nic);
1256 if (err) 1246 if (err)
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
1276 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1266 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1277 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1267 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1278 1268
1279 nic->drv_stats.txq_stop = 0; 1269 /* Send VF config done msg to PF */
1280 nic->drv_stats.txq_wake = 0; 1270 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1271 nicvf_write_to_mbx(nic, &mbx);
1281 1272
1282 return 0; 1273 return 0;
1283cleanup: 1274cleanup:
@@ -1297,17 +1288,6 @@ napi_del:
1297 return err; 1288 return err;
1298} 1289}
1299 1290
1300static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1301{
1302 union nic_mbx mbx = {};
1303
1304 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1305 mbx.frs.max_frs = mtu;
1306 mbx.frs.vf_id = nic->vf_id;
1307
1308 return nicvf_send_msg_to_pf(nic, &mbx);
1309}
1310
1311static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1291static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1312{ 1292{
1313 struct nicvf *nic = netdev_priv(netdev); 1293 struct nicvf *nic = netdev_priv(netdev);
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1318 if (new_mtu < NIC_HW_MIN_FRS) 1298 if (new_mtu < NIC_HW_MIN_FRS)
1319 return -EINVAL; 1299 return -EINVAL;
1320 1300
1301 netdev->mtu = new_mtu;
1302
1303 if (!netif_running(netdev))
1304 return 0;
1305
1321 if (nicvf_update_hw_max_frs(nic, new_mtu)) 1306 if (nicvf_update_hw_max_frs(nic, new_mtu))
1322 return -EINVAL; 1307 return -EINVAL;
1323 netdev->mtu = new_mtu;
1324 nic->mtu = new_mtu;
1325 1308
1326 return 0; 1309 return 0;
1327} 1310}
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
1379 1362
1380void nicvf_update_stats(struct nicvf *nic) 1363void nicvf_update_stats(struct nicvf *nic)
1381{ 1364{
1382 int qidx; 1365 int qidx, cpu;
1366 u64 tmp_stats = 0;
1383 struct nicvf_hw_stats *stats = &nic->hw_stats; 1367 struct nicvf_hw_stats *stats = &nic->hw_stats;
1384 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1368 struct nicvf_drv_stats *drv_stats;
1385 struct queue_set *qs = nic->qs; 1369 struct queue_set *qs = nic->qs;
1386 1370
1387#define GET_RX_STATS(reg) \ 1371#define GET_RX_STATS(reg) \
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
1404 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1388 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1405 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1389 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1406 1390
1407 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1391 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1408 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1392 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1409 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1393 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1410 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1394 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1411 stats->tx_drops = GET_TX_STATS(TX_DROP); 1395 stats->tx_drops = GET_TX_STATS(TX_DROP);
1412 1396
1413 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1397 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1414 stats->tx_bcast_frames_ok + 1398 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1415 stats->tx_mcast_frames_ok; 1399 * pointed by dummy SQE and results in tx_drops counter being
1416 drv_stats->rx_frames_ok = stats->rx_ucast_frames + 1400 * incremented. Subtracting it from tx_tso counter will give
1417 stats->rx_bcast_frames + 1401 * exact tx_drops counter.
1418 stats->rx_mcast_frames; 1402 */
1419 drv_stats->rx_drops = stats->rx_drop_red + 1403 if (nic->t88 && nic->hw_tso) {
1420 stats->rx_drop_overrun; 1404 for_each_possible_cpu(cpu) {
1421 drv_stats->tx_drops = stats->tx_drops; 1405 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1406 tmp_stats += drv_stats->tx_tso;
1407 }
1408 stats->tx_drops = tmp_stats - stats->tx_drops;
1409 }
1410 stats->tx_frames = stats->tx_ucast_frames +
1411 stats->tx_bcast_frames +
1412 stats->tx_mcast_frames;
1413 stats->rx_frames = stats->rx_ucast_frames +
1414 stats->rx_bcast_frames +
1415 stats->rx_mcast_frames;
1416 stats->rx_drops = stats->rx_drop_red +
1417 stats->rx_drop_overrun;
1422 1418
1423 /* Update RQ and SQ stats */ 1419 /* Update RQ and SQ stats */
1424 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1420 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1432{ 1428{
1433 struct nicvf *nic = netdev_priv(netdev); 1429 struct nicvf *nic = netdev_priv(netdev);
1434 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1430 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1435 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1436 1431
1437 nicvf_update_stats(nic); 1432 nicvf_update_stats(nic);
1438 1433
1439 stats->rx_bytes = hw_stats->rx_bytes; 1434 stats->rx_bytes = hw_stats->rx_bytes;
1440 stats->rx_packets = drv_stats->rx_frames_ok; 1435 stats->rx_packets = hw_stats->rx_frames;
1441 stats->rx_dropped = drv_stats->rx_drops; 1436 stats->rx_dropped = hw_stats->rx_drops;
1442 stats->multicast = hw_stats->rx_mcast_frames; 1437 stats->multicast = hw_stats->rx_mcast_frames;
1443 1438
1444 stats->tx_bytes = hw_stats->tx_bytes_ok; 1439 stats->tx_bytes = hw_stats->tx_bytes;
1445 stats->tx_packets = drv_stats->tx_frames_ok; 1440 stats->tx_packets = hw_stats->tx_frames;
1446 stats->tx_dropped = drv_stats->tx_drops; 1441 stats->tx_dropped = hw_stats->tx_drops;
1447 1442
1448 return stats; 1443 return stats;
1449} 1444}
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
1456 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1451 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1457 dev->name); 1452 dev->name);
1458 1453
1459 nic->drv_stats.tx_timeout++; 1454 this_cpu_inc(nic->drv_stats->tx_timeout);
1460 schedule_work(&nic->reset_task); 1455 schedule_work(&nic->reset_task);
1461} 1456}
1462 1457
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1590 goto err_free_netdev; 1585 goto err_free_netdev;
1591 } 1586 }
1592 1587
1588 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1589 if (!nic->drv_stats) {
1590 err = -ENOMEM;
1591 goto err_free_netdev;
1592 }
1593
1593 err = nicvf_set_qset_resources(nic); 1594 err = nicvf_set_qset_resources(nic);
1594 if (err) 1595 if (err)
1595 goto err_free_netdev; 1596 goto err_free_netdev;
@@ -1648,6 +1649,8 @@ err_unregister_interrupts:
1648 nicvf_unregister_interrupts(nic); 1649 nicvf_unregister_interrupts(nic);
1649err_free_netdev: 1650err_free_netdev:
1650 pci_set_drvdata(pdev, NULL); 1651 pci_set_drvdata(pdev, NULL);
1652 if (nic->drv_stats)
1653 free_percpu(nic->drv_stats);
1651 free_netdev(netdev); 1654 free_netdev(netdev);
1652err_release_regions: 1655err_release_regions:
1653 pci_release_regions(pdev); 1656 pci_release_regions(pdev);
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
1675 unregister_netdev(pnetdev); 1678 unregister_netdev(pnetdev);
1676 nicvf_unregister_interrupts(nic); 1679 nicvf_unregister_interrupts(nic);
1677 pci_set_drvdata(pdev, NULL); 1680 pci_set_drvdata(pdev, NULL);
1681 if (nic->drv_stats)
1682 free_percpu(nic->drv_stats);
1678 free_netdev(netdev); 1683 free_netdev(netdev);
1679 pci_release_regions(pdev); 1684 pci_release_regions(pdev);
1680 pci_disable_device(pdev); 1685 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a4fc50155881..747ef0882976 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
105 order); 105 order);
106 if (!nic->rb_page) { 106 if (!nic->rb_page) {
107 nic->drv_stats.rcv_buffer_alloc_failures++; 107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
108 return -ENOMEM; 109 return -ENOMEM;
109 } 110 }
110 nic->rb_page_offset = 0; 111 nic->rb_page_offset = 0;
@@ -270,7 +271,8 @@ refill:
270 rbdr_idx, new_rb); 271 rbdr_idx, new_rb);
271next_rbdr: 272next_rbdr:
272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 273 /* Re-enable RBDR interrupts only if buffer allocation is success */
273 if (!nic->rb_alloc_fail && rbdr->enable) 274 if (!nic->rb_alloc_fail && rbdr->enable &&
275 netif_running(nic->pnicvf->netdev))
274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 276 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
275 277
276 if (rbdr_idx) 278 if (rbdr_idx)
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
361 363
362static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 364static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
363{ 365{
366 struct sk_buff *skb;
367
364 if (!sq) 368 if (!sq)
365 return; 369 return;
366 if (!sq->dmem.base) 370 if (!sq->dmem.base)
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
371 sq->dmem.q_len * TSO_HEADER_SIZE, 375 sq->dmem.q_len * TSO_HEADER_SIZE,
372 sq->tso_hdrs, sq->tso_hdrs_phys); 376 sq->tso_hdrs, sq->tso_hdrs_phys);
373 377
378 /* Free pending skbs in the queue */
379 smp_rmb();
380 while (sq->head != sq->tail) {
381 skb = (struct sk_buff *)sq->skbuff[sq->head];
382 if (skb)
383 dev_kfree_skb_any(skb);
384 sq->head++;
385 sq->head &= (sq->dmem.q_len - 1);
386 }
374 kfree(sq->skbuff); 387 kfree(sq->skbuff);
375 nicvf_free_q_desc_mem(nic, &sq->dmem); 388 nicvf_free_q_desc_mem(nic, &sq->dmem);
376} 389}
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
483{ 496{
484 union nic_mbx mbx = {}; 497 union nic_mbx mbx = {};
485 498
486 /* Reset all RXQ's stats */ 499 /* Reset all RQ/SQ and VF stats */
487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 500 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
501 mbx.reset_stat.rx_stat_mask = 0x3FFF;
502 mbx.reset_stat.tx_stat_mask = 0x1F;
488 mbx.reset_stat.rq_stat_mask = 0xFFFF; 503 mbx.reset_stat.rq_stat_mask = 0xFFFF;
504 mbx.reset_stat.sq_stat_mask = 0xFFFF;
489 nicvf_send_msg_to_pf(nic, &mbx); 505 nicvf_send_msg_to_pf(nic, &mbx);
490} 506}
491 507
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 554 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
539 nicvf_send_msg_to_pf(nic, &mbx); 555 nicvf_send_msg_to_pf(nic, &mbx);
540 556
541 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 557 if (!nic->sqs_mode && (qidx == 0)) {
542 if (!nic->sqs_mode) 558 /* Enable checking L3/L4 length and TCP/UDP checksums */
559 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
560 (BIT(24) | BIT(23) | BIT(21)));
543 nicvf_config_vlan_stripping(nic, nic->netdev->features); 561 nicvf_config_vlan_stripping(nic, nic->netdev->features);
562 }
544 563
545 /* Enable Receive queue */ 564 /* Enable Receive queue */
546 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 565 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1048 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1030 /* For non-tunneled pkts, point this to L2 ethertype */ 1049 /* For non-tunneled pkts, point this to L2 ethertype */
1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1050 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1032 nic->drv_stats.tx_tso++; 1051 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1033 } 1052 }
1034} 1053}
1035 1054
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1161 1180
1162 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1181 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1163 1182
1164 nic->drv_stats.tx_tso++; 1183 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1165 return 1; 1184 return 1;
1166} 1185}
1167 1186
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1422/* Check for errors in the receive cmp.queue entry */ 1441/* Check for errors in the receive cmp.queue entry */
1423int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1442int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1424{ 1443{
1425 struct nicvf_hw_stats *stats = &nic->hw_stats;
1426
1427 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1444 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1428 return 0; 1445 return 0;
1429 1446
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1435 1452
1436 switch (cqe_rx->err_opcode) { 1453 switch (cqe_rx->err_opcode) {
1437 case CQ_RX_ERROP_RE_PARTIAL: 1454 case CQ_RX_ERROP_RE_PARTIAL:
1438 stats->rx_bgx_truncated_pkts++; 1455 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1439 break; 1456 break;
1440 case CQ_RX_ERROP_RE_JABBER: 1457 case CQ_RX_ERROP_RE_JABBER:
1441 stats->rx_jabber_errs++; 1458 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1442 break; 1459 break;
1443 case CQ_RX_ERROP_RE_FCS: 1460 case CQ_RX_ERROP_RE_FCS:
1444 stats->rx_fcs_errs++; 1461 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1445 break; 1462 break;
1446 case CQ_RX_ERROP_RE_RX_CTL: 1463 case CQ_RX_ERROP_RE_RX_CTL:
1447 stats->rx_bgx_errs++; 1464 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1448 break; 1465 break;
1449 case CQ_RX_ERROP_PREL2_ERR: 1466 case CQ_RX_ERROP_PREL2_ERR:
1450 stats->rx_prel2_errs++; 1467 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1451 break; 1468 break;
1452 case CQ_RX_ERROP_L2_MAL: 1469 case CQ_RX_ERROP_L2_MAL:
1453 stats->rx_l2_hdr_malformed++; 1470 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1454 break; 1471 break;
1455 case CQ_RX_ERROP_L2_OVERSIZE: 1472 case CQ_RX_ERROP_L2_OVERSIZE:
1456 stats->rx_oversize++; 1473 this_cpu_inc(nic->drv_stats->rx_oversize);
1457 break; 1474 break;
1458 case CQ_RX_ERROP_L2_UNDERSIZE: 1475 case CQ_RX_ERROP_L2_UNDERSIZE:
1459 stats->rx_undersize++; 1476 this_cpu_inc(nic->drv_stats->rx_undersize);
1460 break; 1477 break;
1461 case CQ_RX_ERROP_L2_LENMISM: 1478 case CQ_RX_ERROP_L2_LENMISM:
1462 stats->rx_l2_len_mismatch++; 1479 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1463 break; 1480 break;
1464 case CQ_RX_ERROP_L2_PCLP: 1481 case CQ_RX_ERROP_L2_PCLP:
1465 stats->rx_l2_pclp++; 1482 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1466 break; 1483 break;
1467 case CQ_RX_ERROP_IP_NOT: 1484 case CQ_RX_ERROP_IP_NOT:
1468 stats->rx_ip_ver_errs++; 1485 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1469 break; 1486 break;
1470 case CQ_RX_ERROP_IP_CSUM_ERR: 1487 case CQ_RX_ERROP_IP_CSUM_ERR:
1471 stats->rx_ip_csum_errs++; 1488 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1472 break; 1489 break;
1473 case CQ_RX_ERROP_IP_MAL: 1490 case CQ_RX_ERROP_IP_MAL:
1474 stats->rx_ip_hdr_malformed++; 1491 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1475 break; 1492 break;
1476 case CQ_RX_ERROP_IP_MALD: 1493 case CQ_RX_ERROP_IP_MALD:
1477 stats->rx_ip_payload_malformed++; 1494 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1478 break; 1495 break;
1479 case CQ_RX_ERROP_IP_HOP: 1496 case CQ_RX_ERROP_IP_HOP:
1480 stats->rx_ip_ttl_errs++; 1497 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1481 break; 1498 break;
1482 case CQ_RX_ERROP_L3_PCLP: 1499 case CQ_RX_ERROP_L3_PCLP:
1483 stats->rx_l3_pclp++; 1500 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1484 break; 1501 break;
1485 case CQ_RX_ERROP_L4_MAL: 1502 case CQ_RX_ERROP_L4_MAL:
1486 stats->rx_l4_malformed++; 1503 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1487 break; 1504 break;
1488 case CQ_RX_ERROP_L4_CHK: 1505 case CQ_RX_ERROP_L4_CHK:
1489 stats->rx_l4_csum_errs++; 1506 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1490 break; 1507 break;
1491 case CQ_RX_ERROP_UDP_LEN: 1508 case CQ_RX_ERROP_UDP_LEN:
1492 stats->rx_udp_len_errs++; 1509 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1493 break; 1510 break;
1494 case CQ_RX_ERROP_L4_PORT: 1511 case CQ_RX_ERROP_L4_PORT:
1495 stats->rx_l4_port_errs++; 1512 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1496 break; 1513 break;
1497 case CQ_RX_ERROP_TCP_FLAG: 1514 case CQ_RX_ERROP_TCP_FLAG:
1498 stats->rx_tcp_flag_errs++; 1515 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1499 break; 1516 break;
1500 case CQ_RX_ERROP_TCP_OFFSET: 1517 case CQ_RX_ERROP_TCP_OFFSET:
1501 stats->rx_tcp_offset_errs++; 1518 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1502 break; 1519 break;
1503 case CQ_RX_ERROP_L4_PCLP: 1520 case CQ_RX_ERROP_L4_PCLP:
1504 stats->rx_l4_pclp++; 1521 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1505 break; 1522 break;
1506 case CQ_RX_ERROP_RBDR_TRUNC: 1523 case CQ_RX_ERROP_RBDR_TRUNC:
1507 stats->rx_truncated_pkts++; 1524 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1508 break; 1525 break;
1509 } 1526 }
1510 1527
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1512} 1529}
1513 1530
1514/* Check for errors in the send cmp.queue entry */ 1531/* Check for errors in the send cmp.queue entry */
1515int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1532int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1516 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1517{ 1533{
1518 struct cmp_queue_stats *stats = &cq->stats;
1519
1520 switch (cqe_tx->send_status) { 1534 switch (cqe_tx->send_status) {
1521 case CQ_TX_ERROP_GOOD: 1535 case CQ_TX_ERROP_GOOD:
1522 stats->tx.good++;
1523 return 0; 1536 return 0;
1524 case CQ_TX_ERROP_DESC_FAULT: 1537 case CQ_TX_ERROP_DESC_FAULT:
1525 stats->tx.desc_fault++; 1538 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1526 break; 1539 break;
1527 case CQ_TX_ERROP_HDR_CONS_ERR: 1540 case CQ_TX_ERROP_HDR_CONS_ERR:
1528 stats->tx.hdr_cons_err++; 1541 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1529 break; 1542 break;
1530 case CQ_TX_ERROP_SUBDC_ERR: 1543 case CQ_TX_ERROP_SUBDC_ERR:
1531 stats->tx.subdesc_err++; 1544 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1545 break;
1546 case CQ_TX_ERROP_MAX_SIZE_VIOL:
1547 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1532 break; 1548 break;
1533 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1549 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1534 stats->tx.imm_size_oflow++; 1550 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1535 break; 1551 break;
1536 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1552 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1537 stats->tx.data_seq_err++; 1553 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1538 break; 1554 break;
1539 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1555 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1540 stats->tx.mem_seq_err++; 1556 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1541 break; 1557 break;
1542 case CQ_TX_ERROP_LOCK_VIOL: 1558 case CQ_TX_ERROP_LOCK_VIOL:
1543 stats->tx.lock_viol++; 1559 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1544 break; 1560 break;
1545 case CQ_TX_ERROP_DATA_FAULT: 1561 case CQ_TX_ERROP_DATA_FAULT:
1546 stats->tx.data_fault++; 1562 this_cpu_inc(nic->drv_stats->tx_data_fault);
1547 break; 1563 break;
1548 case CQ_TX_ERROP_TSTMP_CONFLICT: 1564 case CQ_TX_ERROP_TSTMP_CONFLICT:
1549 stats->tx.tstmp_conflict++; 1565 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1550 break; 1566 break;
1551 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1567 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1552 stats->tx.tstmp_timeout++; 1568 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1553 break; 1569 break;
1554 case CQ_TX_ERROP_MEM_FAULT: 1570 case CQ_TX_ERROP_MEM_FAULT:
1555 stats->tx.mem_fault++; 1571 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1556 break; 1572 break;
1557 case CQ_TX_ERROP_CK_OVERLAP: 1573 case CQ_TX_ERROP_CK_OVERLAP:
1558 stats->tx.csum_overlap++; 1574 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1559 break; 1575 break;
1560 case CQ_TX_ERROP_CK_OFLOW: 1576 case CQ_TX_ERROP_CK_OFLOW:
1561 stats->tx.csum_overflow++; 1577 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
1562 break; 1578 break;
1563 } 1579 }
1564 1580
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 869f3386028b..2e3c940c1093 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
158 CQ_TX_ERROP_DESC_FAULT = 0x10, 158 CQ_TX_ERROP_DESC_FAULT = 0x10,
159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
160 CQ_TX_ERROP_SUBDC_ERR = 0x12, 160 CQ_TX_ERROP_SUBDC_ERR = 0x12,
161 CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
161 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 162 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
162 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 163 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
163 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, 164 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
171 CQ_TX_ERROP_ENUM_LAST = 0x8a, 172 CQ_TX_ERROP_ENUM_LAST = 0x8a,
172}; 173};
173 174
174struct cmp_queue_stats {
175 struct tx_stats {
176 u64 good;
177 u64 desc_fault;
178 u64 hdr_cons_err;
179 u64 subdesc_err;
180 u64 imm_size_oflow;
181 u64 data_seq_err;
182 u64 mem_seq_err;
183 u64 lock_viol;
184 u64 data_fault;
185 u64 tstmp_conflict;
186 u64 tstmp_timeout;
187 u64 mem_fault;
188 u64 csum_overlap;
189 u64 csum_overflow;
190 } tx;
191} ____cacheline_aligned_in_smp;
192
193enum RQ_SQ_STATS { 175enum RQ_SQ_STATS {
194 RQ_SQ_STATS_OCTS, 176 RQ_SQ_STATS_OCTS,
195 RQ_SQ_STATS_PKTS, 177 RQ_SQ_STATS_PKTS,
@@ -241,7 +223,6 @@ struct cmp_queue {
241 spinlock_t lock; /* lock to serialize processing CQEs */ 223 spinlock_t lock; /* lock to serialize processing CQEs */
242 void *desc; 224 void *desc;
243 struct q_desc_mem dmem; 225 struct q_desc_mem dmem;
244 struct cmp_queue_stats stats;
245 int irq; 226 int irq;
246} ____cacheline_aligned_in_smp; 227} ____cacheline_aligned_in_smp;
247 228
@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
336void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 317void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
337void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 318void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
338int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 319int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
339int nicvf_check_cqe_tx_errs(struct nicvf *nic, 320int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
340 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
341#endif /* NICVF_QUEUES_H */ 321#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8bbaedbb7b94..050e21fbb147 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1242 1242
1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1245 bgx->bgx_id = 1245 bgx->bgx_id = (pci_resource_start(pdev,
1246 (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1246 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
1248 bgx->max_lmac = MAX_LMAC_PER_BGX; 1248 bgx->max_lmac = MAX_LMAC_PER_BGX;
1249 bgx_vnic[bgx->bgx_id] = bgx; 1249 bgx_vnic[bgx->bgx_id] = bgx;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index d59c71e4a000..01cc7c859131 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -28,6 +28,8 @@
28#define MAX_DMAC_PER_LMAC 8 28#define MAX_DMAC_PER_LMAC 8
29#define MAX_FRAME_SIZE 9216 29#define MAX_FRAME_SIZE 9216
30 30
31#define BGX_ID_MASK 0x3
32
31#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 33#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
32 34
33/* Registers */ 35/* Registers */