aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3_enet.c')
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1611
1 files changed, 996 insertions, 615 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8c55965a66ac..20fcf0d1c2ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1,11 +1,5 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Copyright (c) 2016~2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9 3
10#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
@@ -15,6 +9,7 @@
15#include <linux/ipv6.h> 9#include <linux/ipv6.h>
16#include <linux/module.h> 10#include <linux/module.h>
17#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/aer.h>
18#include <linux/skbuff.h> 13#include <linux/skbuff.h>
19#include <linux/sctp.h> 14#include <linux/sctp.h>
20#include <linux/vermagic.h> 15#include <linux/vermagic.h>
@@ -25,6 +20,10 @@
25#include "hnae3.h" 20#include "hnae3.h"
26#include "hns3_enet.h" 21#include "hns3_enet.h"
27 22
23static void hns3_clear_all_ring(struct hnae3_handle *h);
24static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
25static void hns3_remove_hw_addr(struct net_device *netdev);
26
28static const char hns3_driver_name[] = "hns3"; 27static const char hns3_driver_name[] = "hns3";
29const char hns3_driver_version[] = VERMAGIC_STRING; 28const char hns3_driver_version[] = VERMAGIC_STRING;
30static const char hns3_driver_string[] = 29static const char hns3_driver_string[] =
@@ -53,21 +52,39 @@ static const struct pci_device_id hns3_pci_tbl[] = {
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, 54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, 55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
56 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
57 /* required last entry */ 57 /* required last entry */
58 {0, } 58 {0, }
59}; 59};
60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); 60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61 61
62static irqreturn_t hns3_irq_handle(int irq, void *dev) 62static irqreturn_t hns3_irq_handle(int irq, void *vector)
63{ 63{
64 struct hns3_enet_tqp_vector *tqp_vector = dev; 64 struct hns3_enet_tqp_vector *tqp_vector = vector;
65 65
66 napi_schedule(&tqp_vector->napi); 66 napi_schedule(&tqp_vector->napi);
67 67
68 return IRQ_HANDLED; 68 return IRQ_HANDLED;
69} 69}
70 70
71/* This callback function is used to set affinity changes to the irq affinity
72 * masks when the irq_set_affinity_notifier function is used.
73 */
74static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
75 const cpumask_t *mask)
76{
77 struct hns3_enet_tqp_vector *tqp_vectors =
78 container_of(notify, struct hns3_enet_tqp_vector,
79 affinity_notify);
80
81 tqp_vectors->affinity_mask = *mask;
82}
83
84static void hns3_nic_irq_affinity_release(struct kref *ref)
85{
86}
87
71static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) 88static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
72{ 89{
73 struct hns3_enet_tqp_vector *tqp_vectors; 90 struct hns3_enet_tqp_vector *tqp_vectors;
@@ -79,6 +96,10 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
79 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) 96 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
80 continue; 97 continue;
81 98
99 /* clear the affinity notifier and affinity mask */
100 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
101 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
102
82 /* release the irq resource */ 103 /* release the irq resource */
83 free_irq(tqp_vectors->vector_irq, tqp_vectors); 104 free_irq(tqp_vectors->vector_irq, tqp_vectors);
84 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; 105 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -129,6 +150,15 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
129 return ret; 150 return ret;
130 } 151 }
131 152
153 tqp_vectors->affinity_notify.notify =
154 hns3_nic_irq_affinity_notify;
155 tqp_vectors->affinity_notify.release =
156 hns3_nic_irq_affinity_release;
157 irq_set_affinity_notifier(tqp_vectors->vector_irq,
158 &tqp_vectors->affinity_notify);
159 irq_set_affinity_hint(tqp_vectors->vector_irq,
160 &tqp_vectors->affinity_mask);
161
132 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; 162 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
133 } 163 }
134 164
@@ -197,8 +227,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
197static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, 227static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
198 struct hns3_nic_priv *priv) 228 struct hns3_nic_priv *priv)
199{ 229{
200 struct hnae3_handle *h = priv->ae_handle;
201
202 /* initialize the configuration for interrupt coalescing. 230 /* initialize the configuration for interrupt coalescing.
203 * 1. GL (Interrupt Gap Limiter) 231 * 1. GL (Interrupt Gap Limiter)
204 * 2. RL (Interrupt Rate Limiter) 232 * 2. RL (Interrupt Rate Limiter)
@@ -211,9 +239,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
211 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; 239 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
212 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; 240 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
213 241
214 /* Default: disable RL */
215 h->kinfo.int_rl_setting = 0;
216
217 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; 242 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
218 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; 243 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
219 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; 244 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
@@ -236,7 +261,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
236 struct hnae3_handle *h = hns3_get_handle(netdev); 261 struct hnae3_handle *h = hns3_get_handle(netdev);
237 struct hnae3_knic_private_info *kinfo = &h->kinfo; 262 struct hnae3_knic_private_info *kinfo = &h->kinfo;
238 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; 263 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
239 int ret; 264 int i, ret;
265
266 if (kinfo->num_tc <= 1) {
267 netdev_reset_tc(netdev);
268 } else {
269 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
270 if (ret) {
271 netdev_err(netdev,
272 "netdev_set_num_tc fail, ret=%d!\n", ret);
273 return ret;
274 }
275
276 for (i = 0; i < HNAE3_MAX_TC; i++) {
277 if (!kinfo->tc_info[i].enable)
278 continue;
279
280 netdev_set_tc_queue(netdev,
281 kinfo->tc_info[i].tc,
282 kinfo->tc_info[i].tqp_count,
283 kinfo->tc_info[i].tqp_offset);
284 }
285 }
240 286
241 ret = netif_set_real_num_tx_queues(netdev, queue_size); 287 ret = netif_set_real_num_tx_queues(netdev, queue_size);
242 if (ret) { 288 if (ret) {
@@ -258,12 +304,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
258 304
259static u16 hns3_get_max_available_channels(struct hnae3_handle *h) 305static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
260{ 306{
261 u16 free_tqps, max_rss_size, max_tqps; 307 u16 alloc_tqps, max_rss_size, rss_size;
262 308
263 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); 309 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
264 max_tqps = h->kinfo.num_tc * max_rss_size; 310 rss_size = alloc_tqps / h->kinfo.num_tc;
265 311
266 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); 312 return min_t(u16, rss_size, max_rss_size);
267} 313}
268 314
269static int hns3_nic_net_up(struct net_device *netdev) 315static int hns3_nic_net_up(struct net_device *netdev)
@@ -273,6 +319,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
273 int i, j; 319 int i, j;
274 int ret; 320 int ret;
275 321
322 ret = hns3_nic_reset_all_ring(h);
323 if (ret)
324 return ret;
325
276 /* get irq resource for all vectors */ 326 /* get irq resource for all vectors */
277 ret = hns3_nic_init_irq(priv); 327 ret = hns3_nic_init_irq(priv);
278 if (ret) { 328 if (ret) {
@@ -305,7 +355,9 @@ out_start_err:
305static int hns3_nic_net_open(struct net_device *netdev) 355static int hns3_nic_net_open(struct net_device *netdev)
306{ 356{
307 struct hns3_nic_priv *priv = netdev_priv(netdev); 357 struct hns3_nic_priv *priv = netdev_priv(netdev);
308 int ret; 358 struct hnae3_handle *h = hns3_get_handle(netdev);
359 struct hnae3_knic_private_info *kinfo;
360 int i, ret;
309 361
310 netif_carrier_off(netdev); 362 netif_carrier_off(netdev);
311 363
@@ -320,6 +372,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
320 return ret; 372 return ret;
321 } 373 }
322 374
375 kinfo = &h->kinfo;
376 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
377 netdev_set_prio_tc_map(netdev, i,
378 kinfo->prio_tc[i]);
379 }
380
323 priv->ae_handle->last_reset_time = jiffies; 381 priv->ae_handle->last_reset_time = jiffies;
324 return 0; 382 return 0;
325} 383}
@@ -333,17 +391,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
333 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) 391 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
334 return; 392 return;
335 393
394 /* disable vectors */
395 for (i = 0; i < priv->vector_num; i++)
396 hns3_vector_disable(&priv->tqp_vector[i]);
397
336 /* stop ae_dev */ 398 /* stop ae_dev */
337 ops = priv->ae_handle->ae_algo->ops; 399 ops = priv->ae_handle->ae_algo->ops;
338 if (ops->stop) 400 if (ops->stop)
339 ops->stop(priv->ae_handle); 401 ops->stop(priv->ae_handle);
340 402
341 /* disable vectors */
342 for (i = 0; i < priv->vector_num; i++)
343 hns3_vector_disable(&priv->tqp_vector[i]);
344
345 /* free irq resources */ 403 /* free irq resources */
346 hns3_nic_uninit_irq(priv); 404 hns3_nic_uninit_irq(priv);
405
406 hns3_clear_all_ring(priv->ae_handle);
347} 407}
348 408
349static int hns3_nic_net_stop(struct net_device *netdev) 409static int hns3_nic_net_stop(struct net_device *netdev)
@@ -400,21 +460,84 @@ static int hns3_nic_mc_unsync(struct net_device *netdev,
400 return 0; 460 return 0;
401} 461}
402 462
463static u8 hns3_get_netdev_flags(struct net_device *netdev)
464{
465 u8 flags = 0;
466
467 if (netdev->flags & IFF_PROMISC) {
468 flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
469 } else {
470 flags |= HNAE3_VLAN_FLTR;
471 if (netdev->flags & IFF_ALLMULTI)
472 flags |= HNAE3_USER_MPE;
473 }
474
475 return flags;
476}
477
403static void hns3_nic_set_rx_mode(struct net_device *netdev) 478static void hns3_nic_set_rx_mode(struct net_device *netdev)
404{ 479{
405 struct hnae3_handle *h = hns3_get_handle(netdev); 480 struct hnae3_handle *h = hns3_get_handle(netdev);
481 u8 new_flags;
482 int ret;
406 483
407 if (h->ae_algo->ops->set_promisc_mode) { 484 new_flags = hns3_get_netdev_flags(netdev);
408 if (netdev->flags & IFF_PROMISC) 485
409 h->ae_algo->ops->set_promisc_mode(h, 1); 486 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
410 else 487 if (ret) {
411 h->ae_algo->ops->set_promisc_mode(h, 0);
412 }
413 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
414 netdev_err(netdev, "sync uc address fail\n"); 488 netdev_err(netdev, "sync uc address fail\n");
415 if (netdev->flags & IFF_MULTICAST) 489 if (ret == -ENOSPC)
416 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) 490 new_flags |= HNAE3_OVERFLOW_UPE;
491 }
492
493 if (netdev->flags & IFF_MULTICAST) {
494 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
495 hns3_nic_mc_unsync);
496 if (ret) {
417 netdev_err(netdev, "sync mc address fail\n"); 497 netdev_err(netdev, "sync mc address fail\n");
498 if (ret == -ENOSPC)
499 new_flags |= HNAE3_OVERFLOW_MPE;
500 }
501 }
502
503 hns3_update_promisc_mode(netdev, new_flags);
504 /* User mode Promisc mode enable and vlan filtering is disabled to
505 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
506 * vlan fitering is enabled
507 */
508 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
509 h->netdev_flags = new_flags;
510}
511
512int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
513{
514 struct hns3_nic_priv *priv = netdev_priv(netdev);
515 struct hnae3_handle *h = priv->ae_handle;
516
517 if (h->ae_algo->ops->set_promisc_mode) {
518 return h->ae_algo->ops->set_promisc_mode(h,
519 promisc_flags & HNAE3_UPE,
520 promisc_flags & HNAE3_MPE);
521 }
522
523 return 0;
524}
525
526void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
527{
528 struct hns3_nic_priv *priv = netdev_priv(netdev);
529 struct hnae3_handle *h = priv->ae_handle;
530 bool last_state;
531
532 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
533 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
534 if (enable != last_state) {
535 netdev_info(netdev,
536 "%s vlan filter\n",
537 enable ? "enable" : "disable");
538 h->ae_algo->ops->enable_vlan_filter(h, enable);
539 }
540 }
418} 541}
419 542
420static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, 543static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
@@ -478,8 +601,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
478 601
479 /* find the txbd field values */ 602 /* find the txbd field values */
480 *paylen = skb->len - hdr_len; 603 *paylen = skb->len - hdr_len;
481 hnae_set_bit(*type_cs_vlan_tso, 604 hnae3_set_bit(*type_cs_vlan_tso,
482 HNS3_TXD_TSO_B, 1); 605 HNS3_TXD_TSO_B, 1);
483 606
484 /* get MSS for TSO */ 607 /* get MSS for TSO */
485 *mss = skb_shinfo(skb)->gso_size; 608 *mss = skb_shinfo(skb)->gso_size;
@@ -502,7 +625,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
502 625
503 /* find outer header point */ 626 /* find outer header point */
504 l3.hdr = skb_network_header(skb); 627 l3.hdr = skb_network_header(skb);
505 l4_hdr = skb_inner_transport_header(skb); 628 l4_hdr = skb_transport_header(skb);
506 629
507 if (skb->protocol == htons(ETH_P_IPV6)) { 630 if (skb->protocol == htons(ETH_P_IPV6)) {
508 exthdr = l3.hdr + sizeof(*l3.v6); 631 exthdr = l3.hdr + sizeof(*l3.v6);
@@ -571,21 +694,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
571 694
572 /* compute L2 header size for normal packet, defined in 2 Bytes */ 695 /* compute L2 header size for normal packet, defined in 2 Bytes */
573 l2_len = l3.hdr - skb->data; 696 l2_len = l3.hdr - skb->data;
574 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 697 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
575 HNS3_TXD_L2LEN_S, l2_len >> 1); 698 HNS3_TXD_L2LEN_S, l2_len >> 1);
576 699
577 /* tunnel packet*/ 700 /* tunnel packet*/
578 if (skb->encapsulation) { 701 if (skb->encapsulation) {
579 /* compute OL2 header size, defined in 2 Bytes */ 702 /* compute OL2 header size, defined in 2 Bytes */
580 ol2_len = l2_len; 703 ol2_len = l2_len;
581 hnae_set_field(*ol_type_vlan_len_msec, 704 hnae3_set_field(*ol_type_vlan_len_msec,
582 HNS3_TXD_L2LEN_M, 705 HNS3_TXD_L2LEN_M,
583 HNS3_TXD_L2LEN_S, ol2_len >> 1); 706 HNS3_TXD_L2LEN_S, ol2_len >> 1);
584 707
585 /* compute OL3 header size, defined in 4 Bytes */ 708 /* compute OL3 header size, defined in 4 Bytes */
586 ol3_len = l4.hdr - l3.hdr; 709 ol3_len = l4.hdr - l3.hdr;
587 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, 710 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
588 HNS3_TXD_L3LEN_S, ol3_len >> 2); 711 HNS3_TXD_L3LEN_S, ol3_len >> 2);
589 712
590 /* MAC in UDP, MAC in GRE (0x6558)*/ 713 /* MAC in UDP, MAC in GRE (0x6558)*/
591 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { 714 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -594,16 +717,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
594 717
595 /* compute OL4 header size, defined in 4 Bytes. */ 718 /* compute OL4 header size, defined in 4 Bytes. */
596 ol4_len = l2_hdr - l4.hdr; 719 ol4_len = l2_hdr - l4.hdr;
597 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, 720 hnae3_set_field(*ol_type_vlan_len_msec,
598 HNS3_TXD_L4LEN_S, ol4_len >> 2); 721 HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
722 ol4_len >> 2);
599 723
600 /* switch IP header ptr from outer to inner header */ 724 /* switch IP header ptr from outer to inner header */
601 l3.hdr = skb_inner_network_header(skb); 725 l3.hdr = skb_inner_network_header(skb);
602 726
603 /* compute inner l2 header size, defined in 2 Bytes. */ 727 /* compute inner l2 header size, defined in 2 Bytes. */
604 l2_len = l3.hdr - l2_hdr; 728 l2_len = l3.hdr - l2_hdr;
605 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, 729 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
606 HNS3_TXD_L2LEN_S, l2_len >> 1); 730 HNS3_TXD_L2LEN_S, l2_len >> 1);
607 } else { 731 } else {
608 /* skb packet types not supported by hardware, 732 /* skb packet types not supported by hardware,
609 * txbd len fild doesn't be filled. 733 * txbd len fild doesn't be filled.
@@ -619,22 +743,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
619 743
620 /* compute inner(/normal) L3 header size, defined in 4 Bytes */ 744 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
621 l3_len = l4.hdr - l3.hdr; 745 l3_len = l4.hdr - l3.hdr;
622 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, 746 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
623 HNS3_TXD_L3LEN_S, l3_len >> 2); 747 HNS3_TXD_L3LEN_S, l3_len >> 2);
624 748
625 /* compute inner(/normal) L4 header size, defined in 4 Bytes */ 749 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
626 switch (l4_proto) { 750 switch (l4_proto) {
627 case IPPROTO_TCP: 751 case IPPROTO_TCP:
628 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 752 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
629 HNS3_TXD_L4LEN_S, l4.tcp->doff); 753 HNS3_TXD_L4LEN_S, l4.tcp->doff);
630 break; 754 break;
631 case IPPROTO_SCTP: 755 case IPPROTO_SCTP:
632 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 756 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
633 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); 757 HNS3_TXD_L4LEN_S,
758 (sizeof(struct sctphdr) >> 2));
634 break; 759 break;
635 case IPPROTO_UDP: 760 case IPPROTO_UDP:
636 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, 761 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
637 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); 762 HNS3_TXD_L4LEN_S,
763 (sizeof(struct udphdr) >> 2));
638 break; 764 break;
639 default: 765 default:
640 /* skb packet types not supported by hardware, 766 /* skb packet types not supported by hardware,
@@ -644,6 +770,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
644 } 770 }
645} 771}
646 772
773/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
774 * and it is udp packet, which has a dest port as the IANA assigned.
775 * the hardware is expected to do the checksum offload, but the
776 * hardware will not do the checksum offload when udp dest port is
777 * 4789.
778 */
779static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
780{
781#define IANA_VXLAN_PORT 4789
782 union {
783 struct tcphdr *tcp;
784 struct udphdr *udp;
785 struct gre_base_hdr *gre;
786 unsigned char *hdr;
787 } l4;
788
789 l4.hdr = skb_transport_header(skb);
790
791 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
792 return false;
793
794 skb_checksum_help(skb);
795
796 return true;
797}
798
647static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, 799static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
648 u8 il4_proto, u32 *type_cs_vlan_tso, 800 u8 il4_proto, u32 *type_cs_vlan_tso,
649 u32 *ol_type_vlan_len_msec) 801 u32 *ol_type_vlan_len_msec)
@@ -662,32 +814,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
662 /* define outer network header type.*/ 814 /* define outer network header type.*/
663 if (skb->protocol == htons(ETH_P_IP)) { 815 if (skb->protocol == htons(ETH_P_IP)) {
664 if (skb_is_gso(skb)) 816 if (skb_is_gso(skb))
665 hnae_set_field(*ol_type_vlan_len_msec, 817 hnae3_set_field(*ol_type_vlan_len_msec,
666 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 818 HNS3_TXD_OL3T_M,
667 HNS3_OL3T_IPV4_CSUM); 819 HNS3_TXD_OL3T_S,
820 HNS3_OL3T_IPV4_CSUM);
668 else 821 else
669 hnae_set_field(*ol_type_vlan_len_msec, 822 hnae3_set_field(*ol_type_vlan_len_msec,
670 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, 823 HNS3_TXD_OL3T_M,
671 HNS3_OL3T_IPV4_NO_CSUM); 824 HNS3_TXD_OL3T_S,
825 HNS3_OL3T_IPV4_NO_CSUM);
672 826
673 } else if (skb->protocol == htons(ETH_P_IPV6)) { 827 } else if (skb->protocol == htons(ETH_P_IPV6)) {
674 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, 828 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
675 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); 829 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
676 } 830 }
677 831
678 /* define tunnel type(OL4).*/ 832 /* define tunnel type(OL4).*/
679 switch (l4_proto) { 833 switch (l4_proto) {
680 case IPPROTO_UDP: 834 case IPPROTO_UDP:
681 hnae_set_field(*ol_type_vlan_len_msec, 835 hnae3_set_field(*ol_type_vlan_len_msec,
682 HNS3_TXD_TUNTYPE_M, 836 HNS3_TXD_TUNTYPE_M,
683 HNS3_TXD_TUNTYPE_S, 837 HNS3_TXD_TUNTYPE_S,
684 HNS3_TUN_MAC_IN_UDP); 838 HNS3_TUN_MAC_IN_UDP);
685 break; 839 break;
686 case IPPROTO_GRE: 840 case IPPROTO_GRE:
687 hnae_set_field(*ol_type_vlan_len_msec, 841 hnae3_set_field(*ol_type_vlan_len_msec,
688 HNS3_TXD_TUNTYPE_M, 842 HNS3_TXD_TUNTYPE_M,
689 HNS3_TXD_TUNTYPE_S, 843 HNS3_TXD_TUNTYPE_S,
690 HNS3_TUN_NVGRE); 844 HNS3_TUN_NVGRE);
691 break; 845 break;
692 default: 846 default:
693 /* drop the skb tunnel packet if hardware don't support, 847 /* drop the skb tunnel packet if hardware don't support,
@@ -708,40 +862,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
708 } 862 }
709 863
710 if (l3.v4->version == 4) { 864 if (l3.v4->version == 4) {
711 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 865 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
712 HNS3_TXD_L3T_S, HNS3_L3T_IPV4); 866 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
713 867
714 /* the stack computes the IP header already, the only time we 868 /* the stack computes the IP header already, the only time we
715 * need the hardware to recompute it is in the case of TSO. 869 * need the hardware to recompute it is in the case of TSO.
716 */ 870 */
717 if (skb_is_gso(skb)) 871 if (skb_is_gso(skb))
718 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); 872 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
719
720 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
721 } else if (l3.v6->version == 6) { 873 } else if (l3.v6->version == 6) {
722 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, 874 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
723 HNS3_TXD_L3T_S, HNS3_L3T_IPV6); 875 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
724 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
725 } 876 }
726 877
727 switch (l4_proto) { 878 switch (l4_proto) {
728 case IPPROTO_TCP: 879 case IPPROTO_TCP:
729 hnae_set_field(*type_cs_vlan_tso, 880 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
730 HNS3_TXD_L4T_M, 881 hnae3_set_field(*type_cs_vlan_tso,
731 HNS3_TXD_L4T_S, 882 HNS3_TXD_L4T_M,
732 HNS3_L4T_TCP); 883 HNS3_TXD_L4T_S,
884 HNS3_L4T_TCP);
733 break; 885 break;
734 case IPPROTO_UDP: 886 case IPPROTO_UDP:
735 hnae_set_field(*type_cs_vlan_tso, 887 if (hns3_tunnel_csum_bug(skb))
736 HNS3_TXD_L4T_M, 888 break;
737 HNS3_TXD_L4T_S, 889
738 HNS3_L4T_UDP); 890 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
891 hnae3_set_field(*type_cs_vlan_tso,
892 HNS3_TXD_L4T_M,
893 HNS3_TXD_L4T_S,
894 HNS3_L4T_UDP);
739 break; 895 break;
740 case IPPROTO_SCTP: 896 case IPPROTO_SCTP:
741 hnae_set_field(*type_cs_vlan_tso, 897 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
742 HNS3_TXD_L4T_M, 898 hnae3_set_field(*type_cs_vlan_tso,
743 HNS3_TXD_L4T_S, 899 HNS3_TXD_L4T_M,
744 HNS3_L4T_SCTP); 900 HNS3_TXD_L4T_S,
901 HNS3_L4T_SCTP);
745 break; 902 break;
746 default: 903 default:
747 /* drop the skb tunnel packet if hardware don't support, 904 /* drop the skb tunnel packet if hardware don't support,
@@ -763,11 +920,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
763static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) 920static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
764{ 921{
765 /* Config bd buffer end */ 922 /* Config bd buffer end */
766 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, 923 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
767 HNS3_TXD_BDTYPE_S, 0); 924 HNS3_TXD_BDTYPE_S, 0);
768 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); 925 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
769 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); 926 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
770 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); 927 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
771} 928}
772 929
773static int hns3_fill_desc_vtags(struct sk_buff *skb, 930static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -800,10 +957,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
800 * and use inner_vtag in one tag case. 957 * and use inner_vtag in one tag case.
801 */ 958 */
802 if (skb->protocol == htons(ETH_P_8021Q)) { 959 if (skb->protocol == htons(ETH_P_8021Q)) {
803 hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); 960 hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
804 *out_vtag = vlan_tag; 961 *out_vtag = vlan_tag;
805 } else { 962 } else {
806 hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); 963 hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
807 *inner_vtag = vlan_tag; 964 *inner_vtag = vlan_tag;
808 } 965 }
809 } else if (skb->protocol == htons(ETH_P_8021Q)) { 966 } else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -823,36 +980,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
823} 980}
824 981
825static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 982static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
826 int size, dma_addr_t dma, int frag_end, 983 int size, int frag_end, enum hns_desc_type type)
827 enum hns_desc_type type)
828{ 984{
829 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 985 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
830 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 986 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
987 struct device *dev = ring_to_dev(ring);
831 u32 ol_type_vlan_len_msec = 0; 988 u32 ol_type_vlan_len_msec = 0;
832 u16 bdtp_fe_sc_vld_ra_ri = 0; 989 u16 bdtp_fe_sc_vld_ra_ri = 0;
990 struct skb_frag_struct *frag;
991 unsigned int frag_buf_num;
833 u32 type_cs_vlan_tso = 0; 992 u32 type_cs_vlan_tso = 0;
834 struct sk_buff *skb; 993 struct sk_buff *skb;
835 u16 inner_vtag = 0; 994 u16 inner_vtag = 0;
836 u16 out_vtag = 0; 995 u16 out_vtag = 0;
996 unsigned int k;
997 int sizeoflast;
837 u32 paylen = 0; 998 u32 paylen = 0;
999 dma_addr_t dma;
838 u16 mss = 0; 1000 u16 mss = 0;
839 __be16 protocol;
840 u8 ol4_proto; 1001 u8 ol4_proto;
841 u8 il4_proto; 1002 u8 il4_proto;
842 int ret; 1003 int ret;
843 1004
844 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
845 desc_cb->priv = priv;
846 desc_cb->length = size;
847 desc_cb->dma = dma;
848 desc_cb->type = type;
849
850 /* now, fill the descriptor */
851 desc->addr = cpu_to_le64(dma);
852 desc->tx.send_size = cpu_to_le16((u16)size);
853 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
854 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
855
856 if (type == DESC_TYPE_SKB) { 1005 if (type == DESC_TYPE_SKB) {
857 skb = (struct sk_buff *)priv; 1006 skb = (struct sk_buff *)priv;
858 paylen = skb->len; 1007 paylen = skb->len;
@@ -865,7 +1014,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
865 1014
866 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
867 skb_reset_mac_len(skb); 1016 skb_reset_mac_len(skb);
868 protocol = skb->protocol;
869 1017
870 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); 1018 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
871 if (ret) 1019 if (ret)
@@ -894,38 +1042,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
894 desc->tx.mss = cpu_to_le16(mss); 1042 desc->tx.mss = cpu_to_le16(mss);
895 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1043 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
896 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1044 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
897 }
898 1045
899 /* move ring pointer to next.*/ 1046 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
900 ring_ptr_move_fw(ring, next_to_use); 1047 } else {
1048 frag = (struct skb_frag_struct *)priv;
1049 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1050 }
901 1051
902 return 0; 1052 if (dma_mapping_error(ring->dev, dma)) {
903} 1053 ring->stats.sw_err_cnt++;
1054 return -ENOMEM;
1055 }
904 1056
905static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, 1057 desc_cb->length = size;
906 int size, dma_addr_t dma, int frag_end,
907 enum hns_desc_type type)
908{
909 unsigned int frag_buf_num;
910 unsigned int k;
911 int sizeoflast;
912 int ret;
913 1058
914 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1059 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
915 sizeoflast = size % HNS3_MAX_BD_SIZE; 1060 sizeoflast = size % HNS3_MAX_BD_SIZE;
916 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1061 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
917 1062
918 /* When the frag size is bigger than hardware, split this frag */ 1063 /* When frag size is bigger than hardware limit, split this frag */
919 for (k = 0; k < frag_buf_num; k++) { 1064 for (k = 0; k < frag_buf_num; k++) {
920 ret = hns3_fill_desc(ring, priv, 1065 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
921 (k == frag_buf_num - 1) ? 1066 desc_cb->priv = priv;
922 sizeoflast : HNS3_MAX_BD_SIZE, 1067 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
923 dma + HNS3_MAX_BD_SIZE * k, 1068 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
924 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 1069 DESC_TYPE_SKB : DESC_TYPE_PAGE;
925 (type == DESC_TYPE_SKB && !k) ? 1070
926 DESC_TYPE_SKB : DESC_TYPE_PAGE); 1071 /* now, fill the descriptor */
927 if (ret) 1072 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
928 return ret; 1073 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1074 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1075 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1076 frag_end && (k == frag_buf_num - 1) ?
1077 1 : 0);
1078 desc->tx.bdtp_fe_sc_vld_ra_ri =
1079 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1080
1081 /* move ring pointer to next.*/
1082 ring_ptr_move_fw(ring, next_to_use);
1083
1084 desc_cb = &ring->desc_cb[ring->next_to_use];
1085 desc = &ring->desc[ring->next_to_use];
929 } 1086 }
930 1087
931 return 0; 1088 return 0;
@@ -973,7 +1130,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
973 /* No. of segments (plus a header) */ 1130 /* No. of segments (plus a header) */
974 buf_num = skb_shinfo(skb)->nr_frags + 1; 1131 buf_num = skb_shinfo(skb)->nr_frags + 1;
975 1132
976 if (buf_num > ring_space(ring)) 1133 if (unlikely(ring_space(ring) < buf_num))
977 return -EBUSY; 1134 return -EBUSY;
978 1135
979 *bnum = buf_num; 1136 *bnum = buf_num;
@@ -981,7 +1138,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
981 return 0; 1138 return 0;
982} 1139}
983 1140
984static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) 1141static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
985{ 1142{
986 struct device *dev = ring_to_dev(ring); 1143 struct device *dev = ring_to_dev(ring);
987 unsigned int i; 1144 unsigned int i;
@@ -997,12 +1154,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
997 ring->desc_cb[ring->next_to_use].dma, 1154 ring->desc_cb[ring->next_to_use].dma,
998 ring->desc_cb[ring->next_to_use].length, 1155 ring->desc_cb[ring->next_to_use].length,
999 DMA_TO_DEVICE); 1156 DMA_TO_DEVICE);
1000 else 1157 else if (ring->desc_cb[ring->next_to_use].length)
1001 dma_unmap_page(dev, 1158 dma_unmap_page(dev,
1002 ring->desc_cb[ring->next_to_use].dma, 1159 ring->desc_cb[ring->next_to_use].dma,
1003 ring->desc_cb[ring->next_to_use].length, 1160 ring->desc_cb[ring->next_to_use].length,
1004 DMA_TO_DEVICE); 1161 DMA_TO_DEVICE);
1005 1162
1163 ring->desc_cb[ring->next_to_use].length = 0;
1164
1006 /* rollback one */ 1165 /* rollback one */
1007 ring_ptr_move_bw(ring, next_to_use); 1166 ring_ptr_move_bw(ring, next_to_use);
1008 } 1167 }
@@ -1014,12 +1173,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1014 struct hns3_nic_ring_data *ring_data = 1173 struct hns3_nic_ring_data *ring_data =
1015 &tx_ring_data(priv, skb->queue_mapping); 1174 &tx_ring_data(priv, skb->queue_mapping);
1016 struct hns3_enet_ring *ring = ring_data->ring; 1175 struct hns3_enet_ring *ring = ring_data->ring;
1017 struct device *dev = priv->dev;
1018 struct netdev_queue *dev_queue; 1176 struct netdev_queue *dev_queue;
1019 struct skb_frag_struct *frag; 1177 struct skb_frag_struct *frag;
1020 int next_to_use_head; 1178 int next_to_use_head;
1021 int next_to_use_frag; 1179 int next_to_use_frag;
1022 dma_addr_t dma;
1023 int buf_num; 1180 int buf_num;
1024 int seg_num; 1181 int seg_num;
1025 int size; 1182 int size;
@@ -1054,35 +1211,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1054 1211
1055 next_to_use_head = ring->next_to_use; 1212 next_to_use_head = ring->next_to_use;
1056 1213
1057 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1214 ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1058 if (dma_mapping_error(dev, dma)) { 1215 DESC_TYPE_SKB);
1059 netdev_err(netdev, "TX head DMA map failed\n");
1060 ring->stats.sw_err_cnt++;
1061 goto out_err_tx_ok;
1062 }
1063
1064 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1065 DESC_TYPE_SKB);
1066 if (ret) 1216 if (ret)
1067 goto head_dma_map_err; 1217 goto head_fill_err;
1068 1218
1069 next_to_use_frag = ring->next_to_use; 1219 next_to_use_frag = ring->next_to_use;
1070 /* Fill the fragments */ 1220 /* Fill the fragments */
1071 for (i = 1; i < seg_num; i++) { 1221 for (i = 1; i < seg_num; i++) {
1072 frag = &skb_shinfo(skb)->frags[i - 1]; 1222 frag = &skb_shinfo(skb)->frags[i - 1];
1073 size = skb_frag_size(frag); 1223 size = skb_frag_size(frag);
1074 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1224
1075 if (dma_mapping_error(dev, dma)) { 1225 ret = priv->ops.fill_desc(ring, frag, size,
1076 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); 1226 seg_num - 1 == i ? 1 : 0,
1077 ring->stats.sw_err_cnt++; 1227 DESC_TYPE_PAGE);
1078 goto frag_dma_map_err;
1079 }
1080 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1081 seg_num - 1 == i ? 1 : 0,
1082 DESC_TYPE_PAGE);
1083 1228
1084 if (ret) 1229 if (ret)
1085 goto frag_dma_map_err; 1230 goto frag_fill_err;
1086 } 1231 }
1087 1232
1088 /* Complete translate all packets */ 1233 /* Complete translate all packets */
@@ -1091,15 +1236,15 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1091 1236
1092 wmb(); /* Commit all data before submit */ 1237 wmb(); /* Commit all data before submit */
1093 1238
1094 hnae_queue_xmit(ring->tqp, buf_num); 1239 hnae3_queue_xmit(ring->tqp, buf_num);
1095 1240
1096 return NETDEV_TX_OK; 1241 return NETDEV_TX_OK;
1097 1242
1098frag_dma_map_err: 1243frag_fill_err:
1099 hns_nic_dma_unmap(ring, next_to_use_frag); 1244 hns3_clear_desc(ring, next_to_use_frag);
1100 1245
1101head_dma_map_err: 1246head_fill_err:
1102 hns_nic_dma_unmap(ring, next_to_use_head); 1247 hns3_clear_desc(ring, next_to_use_head);
1103 1248
1104out_err_tx_ok: 1249out_err_tx_ok:
1105 dev_kfree_skb_any(skb); 1250 dev_kfree_skb_any(skb);
@@ -1121,6 +1266,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1121 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) 1266 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1122 return -EADDRNOTAVAIL; 1267 return -EADDRNOTAVAIL;
1123 1268
1269 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1270 netdev_info(netdev, "already using mac address %pM\n",
1271 mac_addr->sa_data);
1272 return 0;
1273 }
1274
1124 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); 1275 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1125 if (ret) { 1276 if (ret) {
1126 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); 1277 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1132,6 +1283,20 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1132 return 0; 1283 return 0;
1133} 1284}
1134 1285
1286static int hns3_nic_do_ioctl(struct net_device *netdev,
1287 struct ifreq *ifr, int cmd)
1288{
1289 struct hnae3_handle *h = hns3_get_handle(netdev);
1290
1291 if (!netif_running(netdev))
1292 return -EINVAL;
1293
1294 if (!h->ae_algo->ops->do_ioctl)
1295 return -EOPNOTSUPP;
1296
1297 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1298}
1299
1135static int hns3_nic_set_features(struct net_device *netdev, 1300static int hns3_nic_set_features(struct net_device *netdev,
1136 netdev_features_t features) 1301 netdev_features_t features)
1137{ 1302{
@@ -1141,13 +1306,10 @@ static int hns3_nic_set_features(struct net_device *netdev,
1141 int ret; 1306 int ret;
1142 1307
1143 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 1308 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1144 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1309 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1145 priv->ops.fill_desc = hns3_fill_desc_tso;
1146 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 1310 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1147 } else { 1311 else
1148 priv->ops.fill_desc = hns3_fill_desc;
1149 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1312 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1150 }
1151 } 1313 }
1152 1314
1153 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1315 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
@@ -1169,6 +1331,13 @@ static int hns3_nic_set_features(struct net_device *netdev,
1169 return ret; 1331 return ret;
1170 } 1332 }
1171 1333
1334 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1335 if (features & NETIF_F_NTUPLE)
1336 h->ae_algo->ops->enable_fd(h, true);
1337 else
1338 h->ae_algo->ops->enable_fd(h, false);
1339 }
1340
1172 netdev->features = features; 1341 netdev->features = features;
1173 return 0; 1342 return 0;
1174} 1343}
@@ -1244,93 +1413,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
1244 stats->tx_compressed = netdev->stats.tx_compressed; 1413 stats->tx_compressed = netdev->stats.tx_compressed;
1245} 1414}
1246 1415
1247static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1248 enum hns3_udp_tnl_type type)
1249{
1250 struct hns3_nic_priv *priv = netdev_priv(netdev);
1251 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1252 struct hnae3_handle *h = priv->ae_handle;
1253
1254 if (udp_tnl->used && udp_tnl->dst_port == port) {
1255 udp_tnl->used++;
1256 return;
1257 }
1258
1259 if (udp_tnl->used) {
1260 netdev_warn(netdev,
1261 "UDP tunnel [%d], port [%d] offload\n", type, port);
1262 return;
1263 }
1264
1265 udp_tnl->dst_port = port;
1266 udp_tnl->used = 1;
1267 /* TBD send command to hardware to add port */
1268 if (h->ae_algo->ops->add_tunnel_udp)
1269 h->ae_algo->ops->add_tunnel_udp(h, port);
1270}
1271
1272static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1273 enum hns3_udp_tnl_type type)
1274{
1275 struct hns3_nic_priv *priv = netdev_priv(netdev);
1276 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1277 struct hnae3_handle *h = priv->ae_handle;
1278
1279 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1280 netdev_warn(netdev,
1281 "Invalid UDP tunnel port %d\n", port);
1282 return;
1283 }
1284
1285 udp_tnl->used--;
1286 if (udp_tnl->used)
1287 return;
1288
1289 udp_tnl->dst_port = 0;
1290 /* TBD send command to hardware to del port */
1291 if (h->ae_algo->ops->del_tunnel_udp)
1292 h->ae_algo->ops->del_tunnel_udp(h, port);
1293}
1294
1295/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1296 * @netdev: This physical ports's netdev
1297 * @ti: Tunnel information
1298 */
1299static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1300 struct udp_tunnel_info *ti)
1301{
1302 u16 port_n = ntohs(ti->port);
1303
1304 switch (ti->type) {
1305 case UDP_TUNNEL_TYPE_VXLAN:
1306 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1307 break;
1308 case UDP_TUNNEL_TYPE_GENEVE:
1309 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1310 break;
1311 default:
1312 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1313 break;
1314 }
1315}
1316
1317static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1318 struct udp_tunnel_info *ti)
1319{
1320 u16 port_n = ntohs(ti->port);
1321
1322 switch (ti->type) {
1323 case UDP_TUNNEL_TYPE_VXLAN:
1324 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1325 break;
1326 case UDP_TUNNEL_TYPE_GENEVE:
1327 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1328 break;
1329 default:
1330 break;
1331 }
1332}
1333
1334static int hns3_setup_tc(struct net_device *netdev, void *type_data) 1416static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1335{ 1417{
1336 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; 1418 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
@@ -1341,7 +1423,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1341 u16 mode = mqprio_qopt->mode; 1423 u16 mode = mqprio_qopt->mode;
1342 u8 hw = mqprio_qopt->qopt.hw; 1424 u8 hw = mqprio_qopt->qopt.hw;
1343 bool if_running; 1425 bool if_running;
1344 unsigned int i;
1345 int ret; 1426 int ret;
1346 1427
1347 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && 1428 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1365,24 +1446,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1365 if (ret) 1446 if (ret)
1366 goto out; 1447 goto out;
1367 1448
1368 if (tc <= 1) {
1369 netdev_reset_tc(netdev);
1370 } else {
1371 ret = netdev_set_num_tc(netdev, tc);
1372 if (ret)
1373 goto out;
1374
1375 for (i = 0; i < HNAE3_MAX_TC; i++) {
1376 if (!kinfo->tc_info[i].enable)
1377 continue;
1378
1379 netdev_set_tc_queue(netdev,
1380 kinfo->tc_info[i].tc,
1381 kinfo->tc_info[i].tqp_count,
1382 kinfo->tc_info[i].tqp_offset);
1383 }
1384 }
1385
1386 ret = hns3_nic_set_real_num_queue(netdev); 1449 ret = hns3_nic_set_real_num_queue(netdev);
1387 1450
1388out: 1451out:
@@ -1433,18 +1496,22 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1433 return ret; 1496 return ret;
1434} 1497}
1435 1498
1436static void hns3_restore_vlan(struct net_device *netdev) 1499static int hns3_restore_vlan(struct net_device *netdev)
1437{ 1500{
1438 struct hns3_nic_priv *priv = netdev_priv(netdev); 1501 struct hns3_nic_priv *priv = netdev_priv(netdev);
1502 int ret = 0;
1439 u16 vid; 1503 u16 vid;
1440 int ret;
1441 1504
1442 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 1505 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1443 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); 1506 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1444 if (ret) 1507 if (ret) {
1445 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n", 1508 netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
1446 vid, ret); 1509 vid, ret);
1510 return ret;
1511 }
1447 } 1512 }
1513
1514 return ret;
1448} 1515}
1449 1516
1450static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1517static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
@@ -1476,13 +1543,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1476 } 1543 }
1477 1544
1478 ret = h->ae_algo->ops->set_mtu(h, new_mtu); 1545 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1479 if (ret) { 1546 if (ret)
1480 netdev_err(netdev, "failed to change MTU in hardware %d\n", 1547 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1481 ret); 1548 ret);
1482 return ret; 1549 else
1483 } 1550 netdev->mtu = new_mtu;
1484
1485 netdev->mtu = new_mtu;
1486 1551
1487 /* if the netdev was running earlier, bring it up again */ 1552 /* if the netdev was running earlier, bring it up again */
1488 if (if_running && hns3_nic_net_open(netdev)) 1553 if (if_running && hns3_nic_net_open(netdev))
@@ -1555,7 +1620,7 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
1555 1620
1556 /* request the reset */ 1621 /* request the reset */
1557 if (h->ae_algo->ops->reset_event) 1622 if (h->ae_algo->ops->reset_event)
1558 h->ae_algo->ops->reset_event(h); 1623 h->ae_algo->ops->reset_event(h->pdev, h);
1559} 1624}
1560 1625
1561static const struct net_device_ops hns3_nic_netdev_ops = { 1626static const struct net_device_ops hns3_nic_netdev_ops = {
@@ -1564,18 +1629,63 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
1564 .ndo_start_xmit = hns3_nic_net_xmit, 1629 .ndo_start_xmit = hns3_nic_net_xmit,
1565 .ndo_tx_timeout = hns3_nic_net_timeout, 1630 .ndo_tx_timeout = hns3_nic_net_timeout,
1566 .ndo_set_mac_address = hns3_nic_net_set_mac_address, 1631 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1632 .ndo_do_ioctl = hns3_nic_do_ioctl,
1567 .ndo_change_mtu = hns3_nic_change_mtu, 1633 .ndo_change_mtu = hns3_nic_change_mtu,
1568 .ndo_set_features = hns3_nic_set_features, 1634 .ndo_set_features = hns3_nic_set_features,
1569 .ndo_get_stats64 = hns3_nic_get_stats64, 1635 .ndo_get_stats64 = hns3_nic_get_stats64,
1570 .ndo_setup_tc = hns3_nic_setup_tc, 1636 .ndo_setup_tc = hns3_nic_setup_tc,
1571 .ndo_set_rx_mode = hns3_nic_set_rx_mode, 1637 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1572 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1573 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1574 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, 1638 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1575 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, 1639 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1576 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, 1640 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1577}; 1641};
1578 1642
1643static bool hns3_is_phys_func(struct pci_dev *pdev)
1644{
1645 u32 dev_id = pdev->device;
1646
1647 switch (dev_id) {
1648 case HNAE3_DEV_ID_GE:
1649 case HNAE3_DEV_ID_25GE:
1650 case HNAE3_DEV_ID_25GE_RDMA:
1651 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1652 case HNAE3_DEV_ID_50GE_RDMA:
1653 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1654 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1655 return true;
1656 case HNAE3_DEV_ID_100G_VF:
1657 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1658 return false;
1659 default:
1660 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1661 dev_id);
1662 }
1663
1664 return false;
1665}
1666
1667static void hns3_disable_sriov(struct pci_dev *pdev)
1668{
1669 /* If our VFs are assigned we cannot shut down SR-IOV
1670 * without causing issues, so just leave the hardware
1671 * available but disabled
1672 */
1673 if (pci_vfs_assigned(pdev)) {
1674 dev_warn(&pdev->dev,
1675 "disabling driver while VFs are assigned\n");
1676 return;
1677 }
1678
1679 pci_disable_sriov(pdev);
1680}
1681
1682static void hns3_get_dev_capability(struct pci_dev *pdev,
1683 struct hnae3_ae_dev *ae_dev)
1684{
1685 if (pdev->revision >= 0x21)
1686 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1687}
1688
1579/* hns3_probe - Device initialization routine 1689/* hns3_probe - Device initialization routine
1580 * @pdev: PCI device information struct 1690 * @pdev: PCI device information struct
1581 * @ent: entry in hns3_pci_tbl 1691 * @ent: entry in hns3_pci_tbl
@@ -1601,9 +1711,13 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1601 ae_dev->pdev = pdev; 1711 ae_dev->pdev = pdev;
1602 ae_dev->flag = ent->driver_data; 1712 ae_dev->flag = ent->driver_data;
1603 ae_dev->dev_type = HNAE3_DEV_KNIC; 1713 ae_dev->dev_type = HNAE3_DEV_KNIC;
1714 ae_dev->reset_type = HNAE3_NONE_RESET;
1715 hns3_get_dev_capability(pdev, ae_dev);
1604 pci_set_drvdata(pdev, ae_dev); 1716 pci_set_drvdata(pdev, ae_dev);
1605 1717
1606 return hnae3_register_ae_dev(ae_dev); 1718 hnae3_register_ae_dev(ae_dev);
1719
1720 return 0;
1607} 1721}
1608 1722
1609/* hns3_remove - Device removal routine 1723/* hns3_remove - Device removal routine
@@ -1613,20 +1727,118 @@ static void hns3_remove(struct pci_dev *pdev)
1613{ 1727{
1614 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1728 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1615 1729
1730 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1731 hns3_disable_sriov(pdev);
1732
1616 hnae3_unregister_ae_dev(ae_dev); 1733 hnae3_unregister_ae_dev(ae_dev);
1617} 1734}
1618 1735
1736/**
1737 * hns3_pci_sriov_configure
1738 * @pdev: pointer to a pci_dev structure
1739 * @num_vfs: number of VFs to allocate
1740 *
1741 * Enable or change the number of VFs. Called when the user updates the number
1742 * of VFs in sysfs.
1743 **/
1744static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1745{
1746 int ret;
1747
1748 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1749 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1750 return -EINVAL;
1751 }
1752
1753 if (num_vfs) {
1754 ret = pci_enable_sriov(pdev, num_vfs);
1755 if (ret)
1756 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1757 else
1758 return num_vfs;
1759 } else if (!pci_vfs_assigned(pdev)) {
1760 pci_disable_sriov(pdev);
1761 } else {
1762 dev_warn(&pdev->dev,
1763 "Unable to free VFs because some are assigned to VMs.\n");
1764 }
1765
1766 return 0;
1767}
1768
1769static void hns3_shutdown(struct pci_dev *pdev)
1770{
1771 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1772
1773 hnae3_unregister_ae_dev(ae_dev);
1774 devm_kfree(&pdev->dev, ae_dev);
1775 pci_set_drvdata(pdev, NULL);
1776
1777 if (system_state == SYSTEM_POWER_OFF)
1778 pci_set_power_state(pdev, PCI_D3hot);
1779}
1780
1781static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1782 pci_channel_state_t state)
1783{
1784 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1785 pci_ers_result_t ret;
1786
1787 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1788
1789 if (state == pci_channel_io_perm_failure)
1790 return PCI_ERS_RESULT_DISCONNECT;
1791
1792 if (!ae_dev) {
1793 dev_err(&pdev->dev,
1794 "Can't recover - error happened during device init\n");
1795 return PCI_ERS_RESULT_NONE;
1796 }
1797
1798 if (ae_dev->ops->process_hw_error)
1799 ret = ae_dev->ops->process_hw_error(ae_dev);
1800 else
1801 return PCI_ERS_RESULT_NONE;
1802
1803 return ret;
1804}
1805
1806static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1807{
1808 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1809 struct device *dev = &pdev->dev;
1810
1811 dev_info(dev, "requesting reset due to PCI error\n");
1812
1813 /* request the reset */
1814 if (ae_dev->ops->reset_event) {
1815 ae_dev->ops->reset_event(pdev, NULL);
1816 return PCI_ERS_RESULT_RECOVERED;
1817 }
1818
1819 return PCI_ERS_RESULT_DISCONNECT;
1820}
1821
1822static const struct pci_error_handlers hns3_err_handler = {
1823 .error_detected = hns3_error_detected,
1824 .slot_reset = hns3_slot_reset,
1825};
1826
1619static struct pci_driver hns3_driver = { 1827static struct pci_driver hns3_driver = {
1620 .name = hns3_driver_name, 1828 .name = hns3_driver_name,
1621 .id_table = hns3_pci_tbl, 1829 .id_table = hns3_pci_tbl,
1622 .probe = hns3_probe, 1830 .probe = hns3_probe,
1623 .remove = hns3_remove, 1831 .remove = hns3_remove,
1832 .shutdown = hns3_shutdown,
1833 .sriov_configure = hns3_pci_sriov_configure,
1834 .err_handler = &hns3_err_handler,
1624}; 1835};
1625 1836
1626/* set default feature to hns3 */ 1837/* set default feature to hns3 */
1627static void hns3_set_default_feature(struct net_device *netdev) 1838static void hns3_set_default_feature(struct net_device *netdev)
1628{ 1839{
1629 struct hnae3_handle *h = hns3_get_handle(netdev); 1840 struct hnae3_handle *h = hns3_get_handle(netdev);
1841 struct pci_dev *pdev = h->pdev;
1630 1842
1631 netdev->priv_flags |= IFF_UNICAST_FLT; 1843 netdev->priv_flags |= IFF_UNICAST_FLT;
1632 1844
@@ -1634,7 +1846,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
1634 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1846 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1635 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1847 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1636 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1848 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1637 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1849 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1638 1850
1639 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; 1851 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1640 1852
@@ -1646,31 +1858,36 @@ static void hns3_set_default_feature(struct net_device *netdev)
1646 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1858 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1647 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1859 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1648 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1860 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1649 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1861 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1650 1862
1651 netdev->vlan_features |= 1863 netdev->vlan_features |=
1652 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 1864 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1653 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | 1865 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1654 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1866 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1655 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1867 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1656 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1868 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1657 1869
1658 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1870 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1659 NETIF_F_HW_VLAN_CTAG_TX | 1871 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1660 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | 1872 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1661 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | 1873 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1662 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | 1874 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1663 NETIF_F_GSO_UDP_TUNNEL_CSUM; 1875 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1664 1876
1665 if (!(h->flags & HNAE3_SUPPORT_VF)) 1877 if (pdev->revision >= 0x21) {
1666 netdev->hw_features |= 1878 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1667 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; 1879
1880 if (!(h->flags & HNAE3_SUPPORT_VF)) {
1881 netdev->hw_features |= NETIF_F_NTUPLE;
1882 netdev->features |= NETIF_F_NTUPLE;
1883 }
1884 }
1668} 1885}
1669 1886
1670static int hns3_alloc_buffer(struct hns3_enet_ring *ring, 1887static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1671 struct hns3_desc_cb *cb) 1888 struct hns3_desc_cb *cb)
1672{ 1889{
1673 unsigned int order = hnae_page_order(ring); 1890 unsigned int order = hnae3_page_order(ring);
1674 struct page *p; 1891 struct page *p;
1675 1892
1676 p = dev_alloc_pages(order); 1893 p = dev_alloc_pages(order);
@@ -1681,7 +1898,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1681 cb->page_offset = 0; 1898 cb->page_offset = 0;
1682 cb->reuse_flag = 0; 1899 cb->reuse_flag = 0;
1683 cb->buf = page_address(p); 1900 cb->buf = page_address(p);
1684 cb->length = hnae_page_size(ring); 1901 cb->length = hnae3_page_size(ring);
1685 cb->type = DESC_TYPE_PAGE; 1902 cb->type = DESC_TYPE_PAGE;
1686 1903
1687 return 0; 1904 return 0;
@@ -1702,7 +1919,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1702 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 1919 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1703 cb->length, ring_to_dma_dir(ring)); 1920 cb->length, ring_to_dma_dir(ring));
1704 1921
1705 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 1922 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
1706 return -EIO; 1923 return -EIO;
1707 1924
1708 return 0; 1925 return 0;
@@ -1714,7 +1931,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1714 if (cb->type == DESC_TYPE_SKB) 1931 if (cb->type == DESC_TYPE_SKB)
1715 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 1932 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1716 ring_to_dma_dir(ring)); 1933 ring_to_dma_dir(ring));
1717 else 1934 else if (cb->length)
1718 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 1935 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1719 ring_to_dma_dir(ring)); 1936 ring_to_dma_dir(ring));
1720} 1937}
@@ -1747,33 +1964,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
1747/* free desc along with its attached buffer */ 1964/* free desc along with its attached buffer */
1748static void hns3_free_desc(struct hns3_enet_ring *ring) 1965static void hns3_free_desc(struct hns3_enet_ring *ring)
1749{ 1966{
1967 int size = ring->desc_num * sizeof(ring->desc[0]);
1968
1750 hns3_free_buffers(ring); 1969 hns3_free_buffers(ring);
1751 1970
1752 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 1971 if (ring->desc) {
1753 ring->desc_num * sizeof(ring->desc[0]), 1972 dma_free_coherent(ring_to_dev(ring), size,
1754 DMA_BIDIRECTIONAL); 1973 ring->desc, ring->desc_dma_addr);
1755 ring->desc_dma_addr = 0; 1974 ring->desc = NULL;
1756 kfree(ring->desc); 1975 }
1757 ring->desc = NULL;
1758} 1976}
1759 1977
1760static int hns3_alloc_desc(struct hns3_enet_ring *ring) 1978static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1761{ 1979{
1762 int size = ring->desc_num * sizeof(ring->desc[0]); 1980 int size = ring->desc_num * sizeof(ring->desc[0]);
1763 1981
1764 ring->desc = kzalloc(size, GFP_KERNEL); 1982 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1983 &ring->desc_dma_addr,
1984 GFP_KERNEL);
1765 if (!ring->desc) 1985 if (!ring->desc)
1766 return -ENOMEM; 1986 return -ENOMEM;
1767 1987
1768 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1769 size, DMA_BIDIRECTIONAL);
1770 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1771 ring->desc_dma_addr = 0;
1772 kfree(ring->desc);
1773 ring->desc = NULL;
1774 return -ENOMEM;
1775 }
1776
1777 return 0; 1988 return 0;
1778} 1989}
1779 1990
@@ -1836,6 +2047,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1836 hns3_unmap_buffer(ring, &ring->desc_cb[i]); 2047 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1837 ring->desc_cb[i] = *res_cb; 2048 ring->desc_cb[i] = *res_cb;
1838 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); 2049 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2050 ring->desc[i].rx.bd_base_info = 0;
1839} 2051}
1840 2052
1841static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) 2053static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
@@ -1843,6 +2055,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1843 ring->desc_cb[i].reuse_flag = 0; 2055 ring->desc_cb[i].reuse_flag = 0;
1844 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma 2056 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1845 + ring->desc_cb[i].page_offset); 2057 + ring->desc_cb[i].page_offset);
2058 ring->desc[i].rx.bd_base_info = 0;
1846} 2059}
1847 2060
1848static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, 2061static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
@@ -1852,7 +2065,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1852 2065
1853 (*pkts) += (desc_cb->type == DESC_TYPE_SKB); 2066 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1854 (*bytes) += desc_cb->length; 2067 (*bytes) += desc_cb->length;
1855 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ 2068 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
1856 hns3_free_buffer_detach(ring, ring->next_to_clean); 2069 hns3_free_buffer_detach(ring, ring->next_to_clean);
1857 2070
1858 ring_ptr_move_fw(ring, next_to_clean); 2071 ring_ptr_move_fw(ring, next_to_clean);
@@ -1869,9 +2082,10 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1869 return u > c ? (h > c && h <= u) : (h > c || h <= u); 2082 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1870} 2083}
1871 2084
1872bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) 2085void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
1873{ 2086{
1874 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; 2087 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2088 struct hns3_nic_priv *priv = netdev_priv(netdev);
1875 struct netdev_queue *dev_queue; 2089 struct netdev_queue *dev_queue;
1876 int bytes, pkts; 2090 int bytes, pkts;
1877 int head; 2091 int head;
@@ -1880,25 +2094,24 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1880 rmb(); /* Make sure head is ready before touch any data */ 2094 rmb(); /* Make sure head is ready before touch any data */
1881 2095
1882 if (is_ring_empty(ring) || head == ring->next_to_clean) 2096 if (is_ring_empty(ring) || head == ring->next_to_clean)
1883 return true; /* no data to poll */ 2097 return; /* no data to poll */
1884 2098
1885 if (!is_valid_clean_head(ring, head)) { 2099 if (unlikely(!is_valid_clean_head(ring, head))) {
1886 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, 2100 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1887 ring->next_to_use, ring->next_to_clean); 2101 ring->next_to_use, ring->next_to_clean);
1888 2102
1889 u64_stats_update_begin(&ring->syncp); 2103 u64_stats_update_begin(&ring->syncp);
1890 ring->stats.io_err_cnt++; 2104 ring->stats.io_err_cnt++;
1891 u64_stats_update_end(&ring->syncp); 2105 u64_stats_update_end(&ring->syncp);
1892 return true; 2106 return;
1893 } 2107 }
1894 2108
1895 bytes = 0; 2109 bytes = 0;
1896 pkts = 0; 2110 pkts = 0;
1897 while (head != ring->next_to_clean && budget) { 2111 while (head != ring->next_to_clean) {
1898 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); 2112 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1899 /* Issue prefetch for next Tx descriptor */ 2113 /* Issue prefetch for next Tx descriptor */
1900 prefetch(&ring->desc_cb[ring->next_to_clean]); 2114 prefetch(&ring->desc_cb[ring->next_to_clean]);
1901 budget--;
1902 } 2115 }
1903 2116
1904 ring->tqp_vector->tx_group.total_bytes += bytes; 2117 ring->tqp_vector->tx_group.total_bytes += bytes;
@@ -1918,13 +2131,12 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1918 * sees the new next_to_clean. 2131 * sees the new next_to_clean.
1919 */ 2132 */
1920 smp_mb(); 2133 smp_mb();
1921 if (netif_tx_queue_stopped(dev_queue)) { 2134 if (netif_tx_queue_stopped(dev_queue) &&
2135 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
1922 netif_tx_wake_queue(dev_queue); 2136 netif_tx_wake_queue(dev_queue);
1923 ring->stats.restart_queue++; 2137 ring->stats.restart_queue++;
1924 } 2138 }
1925 } 2139 }
1926
1927 return !!budget;
1928} 2140}
1929 2141
1930static int hns3_desc_unused(struct hns3_enet_ring *ring) 2142static int hns3_desc_unused(struct hns3_enet_ring *ring)
@@ -1971,125 +2183,26 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1971 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); 2183 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1972} 2184}
1973 2185
1974/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1975 * @data: pointer to the start of the headers
1976 * @max: total length of section to find headers in
1977 *
1978 * This function is meant to determine the length of headers that will
1979 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1980 * motivation of doing this is to only perform one pull for IPv4 TCP
1981 * packets so that we can do basic things like calculating the gso_size
1982 * based on the average data per packet.
1983 */
1984static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1985 unsigned int max_size)
1986{
1987 unsigned char *network;
1988 u8 hlen;
1989
1990 /* This should never happen, but better safe than sorry */
1991 if (max_size < ETH_HLEN)
1992 return max_size;
1993
1994 /* Initialize network frame pointer */
1995 network = data;
1996
1997 /* Set first protocol and move network header forward */
1998 network += ETH_HLEN;
1999
2000 /* Handle any vlan tag if present */
2001 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
2002 == HNS3_RX_FLAG_VLAN_PRESENT) {
2003 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
2004 return max_size;
2005
2006 network += VLAN_HLEN;
2007 }
2008
2009 /* Handle L3 protocols */
2010 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
2011 == HNS3_RX_FLAG_L3ID_IPV4) {
2012 if ((typeof(max_size))(network - data) >
2013 (max_size - sizeof(struct iphdr)))
2014 return max_size;
2015
2016 /* Access ihl as a u8 to avoid unaligned access on ia64 */
2017 hlen = (network[0] & 0x0F) << 2;
2018
2019 /* Verify hlen meets minimum size requirements */
2020 if (hlen < sizeof(struct iphdr))
2021 return network - data;
2022
2023 /* Record next protocol if header is present */
2024 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
2025 == HNS3_RX_FLAG_L3ID_IPV6) {
2026 if ((typeof(max_size))(network - data) >
2027 (max_size - sizeof(struct ipv6hdr)))
2028 return max_size;
2029
2030 /* Record next protocol */
2031 hlen = sizeof(struct ipv6hdr);
2032 } else {
2033 return network - data;
2034 }
2035
2036 /* Relocate pointer to start of L4 header */
2037 network += hlen;
2038
2039 /* Finally sort out TCP/UDP */
2040 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2041 == HNS3_RX_FLAG_L4ID_TCP) {
2042 if ((typeof(max_size))(network - data) >
2043 (max_size - sizeof(struct tcphdr)))
2044 return max_size;
2045
2046 /* Access doff as a u8 to avoid unaligned access on ia64 */
2047 hlen = (network[12] & 0xF0) >> 2;
2048
2049 /* Verify hlen meets minimum size requirements */
2050 if (hlen < sizeof(struct tcphdr))
2051 return network - data;
2052
2053 network += hlen;
2054 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2055 == HNS3_RX_FLAG_L4ID_UDP) {
2056 if ((typeof(max_size))(network - data) >
2057 (max_size - sizeof(struct udphdr)))
2058 return max_size;
2059
2060 network += sizeof(struct udphdr);
2061 }
2062
2063 /* If everything has gone correctly network should be the
2064 * data section of the packet and will be the end of the header.
2065 * If not then it probably represents the end of the last recognized
2066 * header.
2067 */
2068 if ((typeof(max_size))(network - data) < max_size)
2069 return network - data;
2070 else
2071 return max_size;
2072}
2073
2074static void hns3_nic_reuse_page(struct sk_buff *skb, int i, 2186static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2075 struct hns3_enet_ring *ring, int pull_len, 2187 struct hns3_enet_ring *ring, int pull_len,
2076 struct hns3_desc_cb *desc_cb) 2188 struct hns3_desc_cb *desc_cb)
2077{ 2189{
2078 struct hns3_desc *desc; 2190 struct hns3_desc *desc;
2079 int truesize, size; 2191 u32 truesize;
2192 int size;
2080 int last_offset; 2193 int last_offset;
2081 bool twobufs; 2194 bool twobufs;
2082 2195
2083 twobufs = ((PAGE_SIZE < 8192) && 2196 twobufs = ((PAGE_SIZE < 8192) &&
2084 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); 2197 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2085 2198
2086 desc = &ring->desc[ring->next_to_clean]; 2199 desc = &ring->desc[ring->next_to_clean];
2087 size = le16_to_cpu(desc->rx.size); 2200 size = le16_to_cpu(desc->rx.size);
2088 2201
2089 truesize = hnae_buf_size(ring); 2202 truesize = hnae3_buf_size(ring);
2090 2203
2091 if (!twobufs) 2204 if (!twobufs)
2092 last_offset = hnae_page_size(ring) - hnae_buf_size(ring); 2205 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2093 2206
2094 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 2207 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2095 size - pull_len, truesize); 2208 size - pull_len, truesize);
@@ -2141,14 +2254,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2141 return; 2254 return;
2142 2255
2143 /* check if hardware has done checksum */ 2256 /* check if hardware has done checksum */
2144 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) 2257 if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2145 return; 2258 return;
2146 2259
2147 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || 2260 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2148 hnae_get_bit(l234info, HNS3_RXD_L4E_B) || 2261 hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2149 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || 2262 hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2150 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { 2263 hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2151 netdev_err(netdev, "L3/L4 error pkt\n");
2152 u64_stats_update_begin(&ring->syncp); 2264 u64_stats_update_begin(&ring->syncp);
2153 ring->stats.l3l4_csum_err++; 2265 ring->stats.l3l4_csum_err++;
2154 u64_stats_update_end(&ring->syncp); 2266 u64_stats_update_end(&ring->syncp);
@@ -2156,25 +2268,29 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2156 return; 2268 return;
2157 } 2269 }
2158 2270
2159 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, 2271 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2160 HNS3_RXD_L3ID_S); 2272 HNS3_RXD_L3ID_S);
2161 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, 2273 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2162 HNS3_RXD_L4ID_S); 2274 HNS3_RXD_L4ID_S);
2163 2275
2164 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); 2276 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2277 HNS3_RXD_OL4ID_S);
2165 switch (ol4_type) { 2278 switch (ol4_type) {
2166 case HNS3_OL4_TYPE_MAC_IN_UDP: 2279 case HNS3_OL4_TYPE_MAC_IN_UDP:
2167 case HNS3_OL4_TYPE_NVGRE: 2280 case HNS3_OL4_TYPE_NVGRE:
2168 skb->csum_level = 1; 2281 skb->csum_level = 1;
2282 /* fall through */
2169 case HNS3_OL4_TYPE_NO_TUN: 2283 case HNS3_OL4_TYPE_NO_TUN:
2170 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ 2284 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2171 if (l3_type == HNS3_L3_TYPE_IPV4 || 2285 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2172 (l3_type == HNS3_L3_TYPE_IPV6 && 2286 l3_type == HNS3_L3_TYPE_IPV6) &&
2173 (l4_type == HNS3_L4_TYPE_UDP || 2287 (l4_type == HNS3_L4_TYPE_UDP ||
2174 l4_type == HNS3_L4_TYPE_TCP || 2288 l4_type == HNS3_L4_TYPE_TCP ||
2175 l4_type == HNS3_L4_TYPE_SCTP))) 2289 l4_type == HNS3_L4_TYPE_SCTP))
2176 skb->ip_summed = CHECKSUM_UNNECESSARY; 2290 skb->ip_summed = CHECKSUM_UNNECESSARY;
2177 break; 2291 break;
2292 default:
2293 break;
2178 } 2294 }
2179} 2295}
2180 2296
@@ -2183,6 +2299,51 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2183 napi_gro_receive(&ring->tqp_vector->napi, skb); 2299 napi_gro_receive(&ring->tqp_vector->napi, skb);
2184} 2300}
2185 2301
2302static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2303 struct hns3_desc *desc, u32 l234info,
2304 u16 *vlan_tag)
2305{
2306 struct pci_dev *pdev = ring->tqp->handle->pdev;
2307
2308 if (pdev->revision == 0x20) {
2309 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2310 if (!(*vlan_tag & VLAN_VID_MASK))
2311 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2312
2313 return (*vlan_tag != 0);
2314 }
2315
2316#define HNS3_STRP_OUTER_VLAN 0x1
2317#define HNS3_STRP_INNER_VLAN 0x2
2318
2319 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2320 HNS3_RXD_STRP_TAGP_S)) {
2321 case HNS3_STRP_OUTER_VLAN:
2322 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2323 return true;
2324 case HNS3_STRP_INNER_VLAN:
2325 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2326 return true;
2327 default:
2328 return false;
2329 }
2330}
2331
2332static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2333 struct sk_buff *skb)
2334{
2335 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2336 struct hnae3_handle *handle = ring->tqp->handle;
2337 enum pkt_hash_types rss_type;
2338
2339 if (le32_to_cpu(desc->rx.rss_hash))
2340 rss_type = handle->kinfo.rss_type;
2341 else
2342 rss_type = PKT_HASH_TYPE_NONE;
2343
2344 skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
2345}
2346
2186static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, 2347static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2187 struct sk_buff **out_skb, int *out_bnum) 2348 struct sk_buff **out_skb, int *out_bnum)
2188{ 2349{
@@ -2202,12 +2363,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2202 2363
2203 prefetch(desc); 2364 prefetch(desc);
2204 2365
2205 length = le16_to_cpu(desc->rx.pkt_len); 2366 length = le16_to_cpu(desc->rx.size);
2206 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2367 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2207 l234info = le32_to_cpu(desc->rx.l234_info);
2208 2368
2209 /* Check valid BD */ 2369 /* Check valid BD */
2210 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) 2370 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
2211 return -EFAULT; 2371 return -EFAULT;
2212 2372
2213 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; 2373 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2238,22 +2398,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2238 2398
2239 prefetchw(skb->data); 2399 prefetchw(skb->data);
2240 2400
2241 /* Based on hw strategy, the tag offloaded will be stored at
2242 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2243 * in one layer tag case.
2244 */
2245 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2246 u16 vlan_tag;
2247
2248 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2249 if (!(vlan_tag & VLAN_VID_MASK))
2250 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2251 if (vlan_tag & VLAN_VID_MASK)
2252 __vlan_hwaccel_put_tag(skb,
2253 htons(ETH_P_8021Q),
2254 vlan_tag);
2255 }
2256
2257 bnum = 1; 2401 bnum = 1;
2258 if (length <= HNS3_RX_HEAD_SIZE) { 2402 if (length <= HNS3_RX_HEAD_SIZE) {
2259 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); 2403 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2270,15 +2414,15 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2270 ring->stats.seg_pkt_cnt++; 2414 ring->stats.seg_pkt_cnt++;
2271 u64_stats_update_end(&ring->syncp); 2415 u64_stats_update_end(&ring->syncp);
2272 2416
2273 pull_len = hns3_nic_get_headlen(va, l234info, 2417 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2274 HNS3_RX_HEAD_SIZE); 2418
2275 memcpy(__skb_put(skb, pull_len), va, 2419 memcpy(__skb_put(skb, pull_len), va,
2276 ALIGN(pull_len, sizeof(long))); 2420 ALIGN(pull_len, sizeof(long)));
2277 2421
2278 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); 2422 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2279 ring_ptr_move_fw(ring, next_to_clean); 2423 ring_ptr_move_fw(ring, next_to_clean);
2280 2424
2281 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { 2425 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2282 desc = &ring->desc[ring->next_to_clean]; 2426 desc = &ring->desc[ring->next_to_clean];
2283 desc_cb = &ring->desc_cb[ring->next_to_clean]; 2427 desc_cb = &ring->desc_cb[ring->next_to_clean];
2284 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); 2428 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2290,9 +2434,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2290 2434
2291 *out_bnum = bnum; 2435 *out_bnum = bnum;
2292 2436
2293 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { 2437 l234info = le32_to_cpu(desc->rx.l234_info);
2294 netdev_err(netdev, "no valid bd,%016llx,%016llx\n", 2438
2295 ((u64 *)desc)[0], ((u64 *)desc)[1]); 2439 /* Based on hw strategy, the tag offloaded will be stored at
2440 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2441 * in one layer tag case.
2442 */
2443 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2444 u16 vlan_tag;
2445
2446 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2447 __vlan_hwaccel_put_tag(skb,
2448 htons(ETH_P_8021Q),
2449 vlan_tag);
2450 }
2451
2452 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2296 u64_stats_update_begin(&ring->syncp); 2453 u64_stats_update_begin(&ring->syncp);
2297 ring->stats.non_vld_descs++; 2454 ring->stats.non_vld_descs++;
2298 u64_stats_update_end(&ring->syncp); 2455 u64_stats_update_end(&ring->syncp);
@@ -2302,8 +2459,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2302 } 2459 }
2303 2460
2304 if (unlikely((!desc->rx.pkt_len) || 2461 if (unlikely((!desc->rx.pkt_len) ||
2305 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { 2462 hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2306 netdev_err(netdev, "truncated pkt\n");
2307 u64_stats_update_begin(&ring->syncp); 2463 u64_stats_update_begin(&ring->syncp);
2308 ring->stats.err_pkt_len++; 2464 ring->stats.err_pkt_len++;
2309 u64_stats_update_end(&ring->syncp); 2465 u64_stats_update_end(&ring->syncp);
@@ -2312,8 +2468,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2312 return -EFAULT; 2468 return -EFAULT;
2313 } 2469 }
2314 2470
2315 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { 2471 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
2316 netdev_err(netdev, "L2 error pkt\n");
2317 u64_stats_update_begin(&ring->syncp); 2472 u64_stats_update_begin(&ring->syncp);
2318 ring->stats.l2_err++; 2473 ring->stats.l2_err++;
2319 u64_stats_update_end(&ring->syncp); 2474 u64_stats_update_end(&ring->syncp);
@@ -2330,6 +2485,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2330 ring->tqp_vector->rx_group.total_bytes += skb->len; 2485 ring->tqp_vector->rx_group.total_bytes += skb->len;
2331 2486
2332 hns3_rx_checksum(ring, skb, desc); 2487 hns3_rx_checksum(ring, skb, desc);
2488 hns3_set_rx_skb_rss_type(ring, skb);
2489
2333 return 0; 2490 return 0;
2334} 2491}
2335 2492
@@ -2523,10 +2680,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2523 /* Since the actual Tx work is minimal, we can give the Tx a larger 2680 /* Since the actual Tx work is minimal, we can give the Tx a larger
2524 * budget and be more aggressive about cleaning up the Tx descriptors. 2681 * budget and be more aggressive about cleaning up the Tx descriptors.
2525 */ 2682 */
2526 hns3_for_each_ring(ring, tqp_vector->tx_group) { 2683 hns3_for_each_ring(ring, tqp_vector->tx_group)
2527 if (!hns3_clean_tx_ring(ring, budget)) 2684 hns3_clean_tx_ring(ring);
2528 clean_complete = false;
2529 }
2530 2685
2531 /* make sure rx ring budget not smaller than 1 */ 2686 /* make sure rx ring budget not smaller than 1 */
2532 rx_budget = max(budget / tqp_vector->num_tqps, 1); 2687 rx_budget = max(budget / tqp_vector->num_tqps, 1);
@@ -2565,10 +2720,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2565 tx_ring = tqp_vector->tx_group.ring; 2720 tx_ring = tqp_vector->tx_group.ring;
2566 if (tx_ring) { 2721 if (tx_ring) {
2567 cur_chain->tqp_index = tx_ring->tqp->tqp_index; 2722 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2568 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2723 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2569 HNAE3_RING_TYPE_TX); 2724 HNAE3_RING_TYPE_TX);
2570 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2725 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2571 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); 2726 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2572 2727
2573 cur_chain->next = NULL; 2728 cur_chain->next = NULL;
2574 2729
@@ -2578,16 +2733,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2578 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), 2733 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2579 GFP_KERNEL); 2734 GFP_KERNEL);
2580 if (!chain) 2735 if (!chain)
2581 return -ENOMEM; 2736 goto err_free_chain;
2582 2737
2583 cur_chain->next = chain; 2738 cur_chain->next = chain;
2584 chain->tqp_index = tx_ring->tqp->tqp_index; 2739 chain->tqp_index = tx_ring->tqp->tqp_index;
2585 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2740 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2586 HNAE3_RING_TYPE_TX); 2741 HNAE3_RING_TYPE_TX);
2587 hnae_set_field(chain->int_gl_idx, 2742 hnae3_set_field(chain->int_gl_idx,
2588 HNAE3_RING_GL_IDX_M, 2743 HNAE3_RING_GL_IDX_M,
2589 HNAE3_RING_GL_IDX_S, 2744 HNAE3_RING_GL_IDX_S,
2590 HNAE3_RING_GL_TX); 2745 HNAE3_RING_GL_TX);
2591 2746
2592 cur_chain = chain; 2747 cur_chain = chain;
2593 } 2748 }
@@ -2597,10 +2752,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2597 if (!tx_ring && rx_ring) { 2752 if (!tx_ring && rx_ring) {
2598 cur_chain->next = NULL; 2753 cur_chain->next = NULL;
2599 cur_chain->tqp_index = rx_ring->tqp->tqp_index; 2754 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2600 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, 2755 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2601 HNAE3_RING_TYPE_RX); 2756 HNAE3_RING_TYPE_RX);
2602 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2757 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2603 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2758 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2604 2759
2605 rx_ring = rx_ring->next; 2760 rx_ring = rx_ring->next;
2606 } 2761 }
@@ -2608,14 +2763,14 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2608 while (rx_ring) { 2763 while (rx_ring) {
2609 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); 2764 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2610 if (!chain) 2765 if (!chain)
2611 return -ENOMEM; 2766 goto err_free_chain;
2612 2767
2613 cur_chain->next = chain; 2768 cur_chain->next = chain;
2614 chain->tqp_index = rx_ring->tqp->tqp_index; 2769 chain->tqp_index = rx_ring->tqp->tqp_index;
2615 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, 2770 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2616 HNAE3_RING_TYPE_RX); 2771 HNAE3_RING_TYPE_RX);
2617 hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 2772 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2618 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); 2773 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2619 2774
2620 cur_chain = chain; 2775 cur_chain = chain;
2621 2776
@@ -2623,6 +2778,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2623 } 2778 }
2624 2779
2625 return 0; 2780 return 0;
2781
2782err_free_chain:
2783 cur_chain = head->next;
2784 while (cur_chain) {
2785 chain = cur_chain->next;
2786 devm_kfree(&pdev->dev, chain);
2787 cur_chain = chain;
2788 }
2789
2790 return -ENOMEM;
2626} 2791}
2627 2792
2628static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, 2793static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -2649,13 +2814,32 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2649 group->count++; 2814 group->count++;
2650} 2815}
2651 2816
2817static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
2818{
2819 struct pci_dev *pdev = priv->ae_handle->pdev;
2820 struct hns3_enet_tqp_vector *tqp_vector;
2821 int num_vectors = priv->vector_num;
2822 int numa_node;
2823 int vector_i;
2824
2825 numa_node = dev_to_node(&pdev->dev);
2826
2827 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
2828 tqp_vector = &priv->tqp_vector[vector_i];
2829 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
2830 &tqp_vector->affinity_mask);
2831 }
2832}
2833
2652static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) 2834static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2653{ 2835{
2654 struct hnae3_ring_chain_node vector_ring_chain; 2836 struct hnae3_ring_chain_node vector_ring_chain;
2655 struct hnae3_handle *h = priv->ae_handle; 2837 struct hnae3_handle *h = priv->ae_handle;
2656 struct hns3_enet_tqp_vector *tqp_vector; 2838 struct hns3_enet_tqp_vector *tqp_vector;
2657 int ret = 0; 2839 int ret = 0;
2658 u16 i; 2840 int i;
2841
2842 hns3_nic_set_cpumask(priv);
2659 2843
2660 for (i = 0; i < priv->vector_num; i++) { 2844 for (i = 0; i < priv->vector_num; i++) {
2661 tqp_vector = &priv->tqp_vector[i]; 2845 tqp_vector = &priv->tqp_vector[i];
@@ -2700,13 +2884,19 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2700 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2884 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2701 2885
2702 if (ret) 2886 if (ret)
2703 return ret; 2887 goto map_ring_fail;
2704 2888
2705 netif_napi_add(priv->netdev, &tqp_vector->napi, 2889 netif_napi_add(priv->netdev, &tqp_vector->napi,
2706 hns3_nic_common_poll, NAPI_POLL_WEIGHT); 2890 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2707 } 2891 }
2708 2892
2709 return 0; 2893 return 0;
2894
2895map_ring_fail:
2896 while (i--)
2897 netif_napi_del(&priv->tqp_vector[i].napi);
2898
2899 return ret;
2710} 2900}
2711 2901
2712static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) 2902static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
@@ -2778,10 +2968,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2778 if (ret) 2968 if (ret)
2779 return ret; 2969 return ret;
2780 2970
2781 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2782 if (ret)
2783 return ret;
2784
2785 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); 2971 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2786 2972
2787 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { 2973 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2842,7 +3028,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2842 ring->io_base = q->io_base; 3028 ring->io_base = q->io_base;
2843 } 3029 }
2844 3030
2845 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); 3031 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2846 3032
2847 ring->tqp = q; 3033 ring->tqp = q;
2848 ring->desc = NULL; 3034 ring->desc = NULL;
@@ -2867,8 +3053,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2867 return ret; 3053 return ret;
2868 3054
2869 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); 3055 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2870 if (ret) 3056 if (ret) {
3057 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
2871 return ret; 3058 return ret;
3059 }
2872 3060
2873 return 0; 3061 return 0;
2874} 3062}
@@ -2879,8 +3067,10 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2879 struct pci_dev *pdev = h->pdev; 3067 struct pci_dev *pdev = h->pdev;
2880 int i, ret; 3068 int i, ret;
2881 3069
2882 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * 3070 priv->ring_data = devm_kzalloc(&pdev->dev,
2883 sizeof(*priv->ring_data) * 2, 3071 array3_size(h->kinfo.num_tqps,
3072 sizeof(*priv->ring_data),
3073 2),
2884 GFP_KERNEL); 3074 GFP_KERNEL);
2885 if (!priv->ring_data) 3075 if (!priv->ring_data)
2886 return -ENOMEM; 3076 return -ENOMEM;
@@ -2893,6 +3083,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2893 3083
2894 return 0; 3084 return 0;
2895err: 3085err:
3086 while (i--) {
3087 devm_kfree(priv->dev, priv->ring_data[i].ring);
3088 devm_kfree(priv->dev,
3089 priv->ring_data[i + h->kinfo.num_tqps].ring);
3090 }
3091
2896 devm_kfree(&pdev->dev, priv->ring_data); 3092 devm_kfree(&pdev->dev, priv->ring_data);
2897 return ret; 3093 return ret;
2898} 3094}
@@ -3000,13 +3196,33 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3000 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, 3196 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3001 (u32)((dma >> 31) >> 1)); 3197 (u32)((dma >> 31) >> 1));
3002 3198
3003 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
3004 hns3_buf_size2type(ring->buf_size));
3005 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, 3199 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3006 ring->desc_num / 8 - 1); 3200 ring->desc_num / 8 - 1);
3007 } 3201 }
3008} 3202}
3009 3203
3204static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3205{
3206 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3207 int i;
3208
3209 for (i = 0; i < HNAE3_MAX_TC; i++) {
3210 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3211 int j;
3212
3213 if (!tc_info->enable)
3214 continue;
3215
3216 for (j = 0; j < tc_info->tqp_count; j++) {
3217 struct hnae3_queue *q;
3218
3219 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3220 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3221 tc_info->tc);
3222 }
3223 }
3224}
3225
3010int hns3_init_all_ring(struct hns3_nic_priv *priv) 3226int hns3_init_all_ring(struct hns3_nic_priv *priv)
3011{ 3227{
3012 struct hnae3_handle *h = priv->ae_handle; 3228 struct hnae3_handle *h = priv->ae_handle;
@@ -3022,8 +3238,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
3022 goto out_when_alloc_ring_memory; 3238 goto out_when_alloc_ring_memory;
3023 } 3239 }
3024 3240
3025 hns3_init_ring_hw(priv->ring_data[i].ring);
3026
3027 u64_stats_init(&priv->ring_data[i].ring->syncp); 3241 u64_stats_init(&priv->ring_data[i].ring->syncp);
3028 } 3242 }
3029 3243
@@ -3042,9 +3256,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3042 int i; 3256 int i;
3043 3257
3044 for (i = 0; i < h->kinfo.num_tqps; i++) { 3258 for (i = 0; i < h->kinfo.num_tqps; i++) {
3045 if (h->ae_algo->ops->reset_queue)
3046 h->ae_algo->ops->reset_queue(h, i);
3047
3048 hns3_fini_ring(priv->ring_data[i].ring); 3259 hns3_fini_ring(priv->ring_data[i].ring);
3049 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); 3260 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3050 } 3261 }
@@ -3052,13 +3263,14 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3052} 3263}
3053 3264
3054/* Set mac addr if it is configured. or leave it to the AE driver */ 3265/* Set mac addr if it is configured. or leave it to the AE driver */
3055static void hns3_init_mac_addr(struct net_device *netdev) 3266static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3056{ 3267{
3057 struct hns3_nic_priv *priv = netdev_priv(netdev); 3268 struct hns3_nic_priv *priv = netdev_priv(netdev);
3058 struct hnae3_handle *h = priv->ae_handle; 3269 struct hnae3_handle *h = priv->ae_handle;
3059 u8 mac_addr_temp[ETH_ALEN]; 3270 u8 mac_addr_temp[ETH_ALEN];
3271 int ret = 0;
3060 3272
3061 if (h->ae_algo->ops->get_mac_addr) { 3273 if (h->ae_algo->ops->get_mac_addr && init) {
3062 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); 3274 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3063 ether_addr_copy(netdev->dev_addr, mac_addr_temp); 3275 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3064 } 3276 }
@@ -3071,33 +3283,53 @@ static void hns3_init_mac_addr(struct net_device *netdev)
3071 } 3283 }
3072 3284
3073 if (h->ae_algo->ops->set_mac_addr) 3285 if (h->ae_algo->ops->set_mac_addr)
3074 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); 3286 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3075 3287
3288 return ret;
3289}
3290
3291static int hns3_restore_fd_rules(struct net_device *netdev)
3292{
3293 struct hnae3_handle *h = hns3_get_handle(netdev);
3294 int ret = 0;
3295
3296 if (h->ae_algo->ops->restore_fd_rules)
3297 ret = h->ae_algo->ops->restore_fd_rules(h);
3298
3299 return ret;
3300}
3301
3302static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3303{
3304 struct hnae3_handle *h = hns3_get_handle(netdev);
3305
3306 if (h->ae_algo->ops->del_all_fd_entries)
3307 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3076} 3308}
3077 3309
3078static void hns3_nic_set_priv_ops(struct net_device *netdev) 3310static void hns3_nic_set_priv_ops(struct net_device *netdev)
3079{ 3311{
3080 struct hns3_nic_priv *priv = netdev_priv(netdev); 3312 struct hns3_nic_priv *priv = netdev_priv(netdev);
3081 3313
3314 priv->ops.fill_desc = hns3_fill_desc;
3082 if ((netdev->features & NETIF_F_TSO) || 3315 if ((netdev->features & NETIF_F_TSO) ||
3083 (netdev->features & NETIF_F_TSO6)) { 3316 (netdev->features & NETIF_F_TSO6))
3084 priv->ops.fill_desc = hns3_fill_desc_tso;
3085 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 3317 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3086 } else { 3318 else
3087 priv->ops.fill_desc = hns3_fill_desc;
3088 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 3319 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3089 }
3090} 3320}
3091 3321
3092static int hns3_client_init(struct hnae3_handle *handle) 3322static int hns3_client_init(struct hnae3_handle *handle)
3093{ 3323{
3094 struct pci_dev *pdev = handle->pdev; 3324 struct pci_dev *pdev = handle->pdev;
3325 u16 alloc_tqps, max_rss_size;
3095 struct hns3_nic_priv *priv; 3326 struct hns3_nic_priv *priv;
3096 struct net_device *netdev; 3327 struct net_device *netdev;
3097 int ret; 3328 int ret;
3098 3329
3099 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), 3330 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3100 hns3_get_max_available_channels(handle)); 3331 &max_rss_size);
3332 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3101 if (!netdev) 3333 if (!netdev)
3102 return -ENOMEM; 3334 return -ENOMEM;
3103 3335
@@ -3105,14 +3337,13 @@ static int hns3_client_init(struct hnae3_handle *handle)
3105 priv->dev = &pdev->dev; 3337 priv->dev = &pdev->dev;
3106 priv->netdev = netdev; 3338 priv->netdev = netdev;
3107 priv->ae_handle = handle; 3339 priv->ae_handle = handle;
3108 priv->ae_handle->reset_level = HNAE3_NONE_RESET;
3109 priv->ae_handle->last_reset_time = jiffies; 3340 priv->ae_handle->last_reset_time = jiffies;
3110 priv->tx_timeout_count = 0; 3341 priv->tx_timeout_count = 0;
3111 3342
3112 handle->kinfo.netdev = netdev; 3343 handle->kinfo.netdev = netdev;
3113 handle->priv = (void *)priv; 3344 handle->priv = (void *)priv;
3114 3345
3115 hns3_init_mac_addr(netdev); 3346 hns3_init_mac_addr(netdev, true);
3116 3347
3117 hns3_set_default_feature(netdev); 3348 hns3_set_default_feature(netdev);
3118 3349
@@ -3126,6 +3357,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
3126 /* Carrier off reporting is important to ethtool even BEFORE open */ 3357 /* Carrier off reporting is important to ethtool even BEFORE open */
3127 netif_carrier_off(netdev); 3358 netif_carrier_off(netdev);
3128 3359
3360 if (handle->flags & HNAE3_SUPPORT_VF)
3361 handle->reset_level = HNAE3_VF_RESET;
3362 else
3363 handle->reset_level = HNAE3_FUNC_RESET;
3364
3129 ret = hns3_get_ring_config(priv); 3365 ret = hns3_get_ring_config(priv);
3130 if (ret) { 3366 if (ret) {
3131 ret = -ENOMEM; 3367 ret = -ENOMEM;
@@ -3182,9 +3418,15 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3182 struct hns3_nic_priv *priv = netdev_priv(netdev); 3418 struct hns3_nic_priv *priv = netdev_priv(netdev);
3183 int ret; 3419 int ret;
3184 3420
3421 hns3_remove_hw_addr(netdev);
3422
3185 if (netdev->reg_state != NETREG_UNINITIALIZED) 3423 if (netdev->reg_state != NETREG_UNINITIALIZED)
3186 unregister_netdev(netdev); 3424 unregister_netdev(netdev);
3187 3425
3426 hns3_del_all_fd_rules(netdev, true);
3427
3428 hns3_force_clear_all_rx_ring(handle);
3429
3188 ret = hns3_nic_uninit_vector_data(priv); 3430 ret = hns3_nic_uninit_vector_data(priv);
3189 if (ret) 3431 if (ret)
3190 netdev_err(netdev, "uninit vector error\n"); 3432 netdev_err(netdev, "uninit vector error\n");
@@ -3228,7 +3470,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3228 struct net_device *ndev = kinfo->netdev; 3470 struct net_device *ndev = kinfo->netdev;
3229 bool if_running; 3471 bool if_running;
3230 int ret; 3472 int ret;
3231 u8 i;
3232 3473
3233 if (tc > HNAE3_MAX_TC) 3474 if (tc > HNAE3_MAX_TC)
3234 return -EINVAL; 3475 return -EINVAL;
@@ -3238,10 +3479,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3238 3479
3239 if_running = netif_running(ndev); 3480 if_running = netif_running(ndev);
3240 3481
3241 ret = netdev_set_num_tc(ndev, tc);
3242 if (ret)
3243 return ret;
3244
3245 if (if_running) { 3482 if (if_running) {
3246 (void)hns3_nic_net_stop(ndev); 3483 (void)hns3_nic_net_stop(ndev);
3247 msleep(100); 3484 msleep(100);
@@ -3252,27 +3489,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3252 if (ret) 3489 if (ret)
3253 goto err_out; 3490 goto err_out;
3254 3491
3255 if (tc <= 1) {
3256 netdev_reset_tc(ndev);
3257 goto out;
3258 }
3259
3260 for (i = 0; i < HNAE3_MAX_TC; i++) {
3261 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3262
3263 if (tc_info->enable)
3264 netdev_set_tc_queue(ndev,
3265 tc_info->tc,
3266 tc_info->tqp_count,
3267 tc_info->tqp_offset);
3268 }
3269
3270 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3271 netdev_set_prio_tc_map(ndev, i,
3272 kinfo->prio_tc[i]);
3273 }
3274
3275out:
3276 ret = hns3_nic_set_real_num_queue(ndev); 3492 ret = hns3_nic_set_real_num_queue(ndev);
3277 3493
3278err_out: 3494err_out:
@@ -3282,25 +3498,120 @@ err_out:
3282 return ret; 3498 return ret;
3283} 3499}
3284 3500
3285static void hns3_recover_hw_addr(struct net_device *ndev) 3501static int hns3_recover_hw_addr(struct net_device *ndev)
3286{ 3502{
3287 struct netdev_hw_addr_list *list; 3503 struct netdev_hw_addr_list *list;
3288 struct netdev_hw_addr *ha, *tmp; 3504 struct netdev_hw_addr *ha, *tmp;
3505 int ret = 0;
3289 3506
3290 /* go through and sync uc_addr entries to the device */ 3507 /* go through and sync uc_addr entries to the device */
3291 list = &ndev->uc; 3508 list = &ndev->uc;
3292 list_for_each_entry_safe(ha, tmp, &list->list, list) 3509 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3293 hns3_nic_uc_sync(ndev, ha->addr); 3510 ret = hns3_nic_uc_sync(ndev, ha->addr);
3511 if (ret)
3512 return ret;
3513 }
3294 3514
3295 /* go through and sync mc_addr entries to the device */ 3515 /* go through and sync mc_addr entries to the device */
3296 list = &ndev->mc; 3516 list = &ndev->mc;
3517 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3518 ret = hns3_nic_mc_sync(ndev, ha->addr);
3519 if (ret)
3520 return ret;
3521 }
3522
3523 return ret;
3524}
3525
3526static void hns3_remove_hw_addr(struct net_device *netdev)
3527{
3528 struct netdev_hw_addr_list *list;
3529 struct netdev_hw_addr *ha, *tmp;
3530
3531 hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3532
3533 /* go through and unsync uc_addr entries to the device */
3534 list = &netdev->uc;
3535 list_for_each_entry_safe(ha, tmp, &list->list, list)
3536 hns3_nic_uc_unsync(netdev, ha->addr);
3537
3538 /* go through and unsync mc_addr entries to the device */
3539 list = &netdev->mc;
3297 list_for_each_entry_safe(ha, tmp, &list->list, list) 3540 list_for_each_entry_safe(ha, tmp, &list->list, list)
3298 hns3_nic_mc_sync(ndev, ha->addr); 3541 if (ha->refcount > 1)
3542 hns3_nic_mc_unsync(netdev, ha->addr);
3299} 3543}
3300 3544
3301static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) 3545static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3302{ 3546{
3303 dev_kfree_skb_any(skb); 3547 while (ring->next_to_clean != ring->next_to_use) {
3548 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3549 hns3_free_buffer_detach(ring, ring->next_to_clean);
3550 ring_ptr_move_fw(ring, next_to_clean);
3551 }
3552}
3553
3554static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3555{
3556 struct hns3_desc_cb res_cbs;
3557 int ret;
3558
3559 while (ring->next_to_use != ring->next_to_clean) {
3560 /* When a buffer is not reused, it's memory has been
3561 * freed in hns3_handle_rx_bd or will be freed by
3562 * stack, so we need to replace the buffer here.
3563 */
3564 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3565 ret = hns3_reserve_buffer_map(ring, &res_cbs);
3566 if (ret) {
3567 u64_stats_update_begin(&ring->syncp);
3568 ring->stats.sw_err_cnt++;
3569 u64_stats_update_end(&ring->syncp);
3570 /* if alloc new buffer fail, exit directly
3571 * and reclear in up flow.
3572 */
3573 netdev_warn(ring->tqp->handle->kinfo.netdev,
3574 "reserve buffer map failed, ret = %d\n",
3575 ret);
3576 return ret;
3577 }
3578 hns3_replace_buffer(ring, ring->next_to_use,
3579 &res_cbs);
3580 }
3581 ring_ptr_move_fw(ring, next_to_use);
3582 }
3583
3584 return 0;
3585}
3586
3587static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3588{
3589 while (ring->next_to_use != ring->next_to_clean) {
3590 /* When a buffer is not reused, it's memory has been
3591 * freed in hns3_handle_rx_bd or will be freed by
3592 * stack, so only need to unmap the buffer here.
3593 */
3594 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3595 hns3_unmap_buffer(ring,
3596 &ring->desc_cb[ring->next_to_use]);
3597 ring->desc_cb[ring->next_to_use].dma = 0;
3598 }
3599
3600 ring_ptr_move_fw(ring, next_to_use);
3601 }
3602}
3603
3604static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3605{
3606 struct net_device *ndev = h->kinfo.netdev;
3607 struct hns3_nic_priv *priv = netdev_priv(ndev);
3608 struct hns3_enet_ring *ring;
3609 u32 i;
3610
3611 for (i = 0; i < h->kinfo.num_tqps; i++) {
3612 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3613 hns3_force_clear_rx_ring(ring);
3614 }
3304} 3615}
3305 3616
3306static void hns3_clear_all_ring(struct hnae3_handle *h) 3617static void hns3_clear_all_ring(struct hnae3_handle *h)
@@ -3314,13 +3625,84 @@ static void hns3_clear_all_ring(struct hnae3_handle *h)
3314 struct hns3_enet_ring *ring; 3625 struct hns3_enet_ring *ring;
3315 3626
3316 ring = priv->ring_data[i].ring; 3627 ring = priv->ring_data[i].ring;
3317 hns3_clean_tx_ring(ring, ring->desc_num); 3628 hns3_clear_tx_ring(ring);
3318 dev_queue = netdev_get_tx_queue(ndev, 3629 dev_queue = netdev_get_tx_queue(ndev,
3319 priv->ring_data[i].queue_index); 3630 priv->ring_data[i].queue_index);
3320 netdev_tx_reset_queue(dev_queue); 3631 netdev_tx_reset_queue(dev_queue);
3321 3632
3322 ring = priv->ring_data[i + h->kinfo.num_tqps].ring; 3633 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3323 hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); 3634 /* Continue to clear other rings even if clearing some
3635 * rings failed.
3636 */
3637 hns3_clear_rx_ring(ring);
3638 }
3639}
3640
3641int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3642{
3643 struct net_device *ndev = h->kinfo.netdev;
3644 struct hns3_nic_priv *priv = netdev_priv(ndev);
3645 struct hns3_enet_ring *rx_ring;
3646 int i, j;
3647 int ret;
3648
3649 for (i = 0; i < h->kinfo.num_tqps; i++) {
3650 ret = h->ae_algo->ops->reset_queue(h, i);
3651 if (ret)
3652 return ret;
3653
3654 hns3_init_ring_hw(priv->ring_data[i].ring);
3655
3656 /* We need to clear tx ring here because self test will
3657 * use the ring and will not run down before up
3658 */
3659 hns3_clear_tx_ring(priv->ring_data[i].ring);
3660 priv->ring_data[i].ring->next_to_clean = 0;
3661 priv->ring_data[i].ring->next_to_use = 0;
3662
3663 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3664 hns3_init_ring_hw(rx_ring);
3665 ret = hns3_clear_rx_ring(rx_ring);
3666 if (ret)
3667 return ret;
3668
3669 /* We can not know the hardware head and tail when this
3670 * function is called in reset flow, so we reuse all desc.
3671 */
3672 for (j = 0; j < rx_ring->desc_num; j++)
3673 hns3_reuse_buffer(rx_ring, j);
3674
3675 rx_ring->next_to_clean = 0;
3676 rx_ring->next_to_use = 0;
3677 }
3678
3679 hns3_init_tx_ring_tc(priv);
3680
3681 return 0;
3682}
3683
3684static void hns3_store_coal(struct hns3_nic_priv *priv)
3685{
3686 /* ethtool only support setting and querying one coal
3687 * configuation for now, so save the vector 0' coal
3688 * configuation here in order to restore it.
3689 */
3690 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3691 sizeof(struct hns3_enet_coalesce));
3692 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3693 sizeof(struct hns3_enet_coalesce));
3694}
3695
3696static void hns3_restore_coal(struct hns3_nic_priv *priv)
3697{
3698 u16 vector_num = priv->vector_num;
3699 int i;
3700
3701 for (i = 0; i < vector_num; i++) {
3702 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3703 sizeof(struct hns3_enet_coalesce));
3704 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3705 sizeof(struct hns3_enet_coalesce));
3324 } 3706 }
3325} 3707}
3326 3708
@@ -3330,7 +3712,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3330 struct net_device *ndev = kinfo->netdev; 3712 struct net_device *ndev = kinfo->netdev;
3331 3713
3332 if (!netif_running(ndev)) 3714 if (!netif_running(ndev))
3333 return -EIO; 3715 return 0;
3334 3716
3335 return hns3_nic_net_stop(ndev); 3717 return hns3_nic_net_stop(ndev);
3336} 3718}
@@ -3357,22 +3739,39 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3357{ 3739{
3358 struct net_device *netdev = handle->kinfo.netdev; 3740 struct net_device *netdev = handle->kinfo.netdev;
3359 struct hns3_nic_priv *priv = netdev_priv(netdev); 3741 struct hns3_nic_priv *priv = netdev_priv(netdev);
3742 bool vlan_filter_enable;
3360 int ret; 3743 int ret;
3361 3744
3362 hns3_init_mac_addr(netdev); 3745 ret = hns3_init_mac_addr(netdev, false);
3363 hns3_nic_set_rx_mode(netdev); 3746 if (ret)
3364 hns3_recover_hw_addr(netdev); 3747 return ret;
3748
3749 ret = hns3_recover_hw_addr(netdev);
3750 if (ret)
3751 return ret;
3752
3753 ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
3754 if (ret)
3755 return ret;
3756
3757 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
3758 hns3_enable_vlan_filter(netdev, vlan_filter_enable);
3365 3759
3366 /* Hardware table is only clear when pf resets */ 3760 /* Hardware table is only clear when pf resets */
3367 if (!(handle->flags & HNAE3_SUPPORT_VF)) 3761 if (!(handle->flags & HNAE3_SUPPORT_VF)) {
3368 hns3_restore_vlan(netdev); 3762 ret = hns3_restore_vlan(netdev);
3763 if (ret)
3764 return ret;
3765 }
3766
3767 ret = hns3_restore_fd_rules(netdev);
3768 if (ret)
3769 return ret;
3369 3770
3370 /* Carrier off reporting is important to ethtool even BEFORE open */ 3771 /* Carrier off reporting is important to ethtool even BEFORE open */
3371 netif_carrier_off(netdev); 3772 netif_carrier_off(netdev);
3372 3773
3373 ret = hns3_get_ring_config(priv); 3774 hns3_restore_coal(priv);
3374 if (ret)
3375 return ret;
3376 3775
3377 ret = hns3_nic_init_vector_data(priv); 3776 ret = hns3_nic_init_vector_data(priv);
3378 if (ret) 3777 if (ret)
@@ -3389,11 +3788,12 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3389 3788
3390static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) 3789static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3391{ 3790{
3791 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
3392 struct net_device *netdev = handle->kinfo.netdev; 3792 struct net_device *netdev = handle->kinfo.netdev;
3393 struct hns3_nic_priv *priv = netdev_priv(netdev); 3793 struct hns3_nic_priv *priv = netdev_priv(netdev);
3394 int ret; 3794 int ret;
3395 3795
3396 hns3_clear_all_ring(handle); 3796 hns3_force_clear_all_rx_ring(handle);
3397 3797
3398 ret = hns3_nic_uninit_vector_data(priv); 3798 ret = hns3_nic_uninit_vector_data(priv);
3399 if (ret) { 3799 if (ret) {
@@ -3401,13 +3801,20 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3401 return ret; 3801 return ret;
3402 } 3802 }
3403 3803
3804 hns3_store_coal(priv);
3805
3404 ret = hns3_uninit_all_ring(priv); 3806 ret = hns3_uninit_all_ring(priv);
3405 if (ret) 3807 if (ret)
3406 netdev_err(netdev, "uninit ring error\n"); 3808 netdev_err(netdev, "uninit ring error\n");
3407 3809
3408 hns3_put_ring_config(priv); 3810 /* it is cumbersome for hardware to pick-and-choose entries for deletion
3409 3811 * from table space. Hence, for function reset software intervention is
3410 priv->ring_data = NULL; 3812 * required to delete the entries
3813 */
3814 if (hns3_dev_ongoing_func_reset(ae_dev)) {
3815 hns3_remove_hw_addr(netdev);
3816 hns3_del_all_fd_rules(netdev, false);
3817 }
3411 3818
3412 return ret; 3819 return ret;
3413} 3820}
@@ -3437,24 +3844,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
3437 return ret; 3844 return ret;
3438} 3845}
3439 3846
3440static void hns3_restore_coal(struct hns3_nic_priv *priv, 3847static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3441 struct hns3_enet_coalesce *tx,
3442 struct hns3_enet_coalesce *rx)
3443{
3444 u16 vector_num = priv->vector_num;
3445 int i;
3446
3447 for (i = 0; i < vector_num; i++) {
3448 memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
3449 sizeof(struct hns3_enet_coalesce));
3450 memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
3451 sizeof(struct hns3_enet_coalesce));
3452 }
3453}
3454
3455static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3456 struct hns3_enet_coalesce *tx,
3457 struct hns3_enet_coalesce *rx)
3458{ 3848{
3459 struct hns3_nic_priv *priv = netdev_priv(netdev); 3849 struct hns3_nic_priv *priv = netdev_priv(netdev);
3460 struct hnae3_handle *h = hns3_get_handle(netdev); 3850 struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -3472,7 +3862,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3472 if (ret) 3862 if (ret)
3473 goto err_alloc_vector; 3863 goto err_alloc_vector;
3474 3864
3475 hns3_restore_coal(priv, tx, rx); 3865 hns3_restore_coal(priv);
3476 3866
3477 ret = hns3_nic_init_vector_data(priv); 3867 ret = hns3_nic_init_vector_data(priv);
3478 if (ret) 3868 if (ret)
@@ -3504,7 +3894,6 @@ int hns3_set_channels(struct net_device *netdev,
3504 struct hns3_nic_priv *priv = netdev_priv(netdev); 3894 struct hns3_nic_priv *priv = netdev_priv(netdev);
3505 struct hnae3_handle *h = hns3_get_handle(netdev); 3895 struct hnae3_handle *h = hns3_get_handle(netdev);
3506 struct hnae3_knic_private_info *kinfo = &h->kinfo; 3896 struct hnae3_knic_private_info *kinfo = &h->kinfo;
3507 struct hns3_enet_coalesce tx_coal, rx_coal;
3508 bool if_running = netif_running(netdev); 3897 bool if_running = netif_running(netdev);
3509 u32 new_tqp_num = ch->combined_count; 3898 u32 new_tqp_num = ch->combined_count;
3510 u16 org_tqp_num; 3899 u16 org_tqp_num;
@@ -3529,8 +3918,6 @@ int hns3_set_channels(struct net_device *netdev,
3529 if (if_running) 3918 if (if_running)
3530 hns3_nic_net_stop(netdev); 3919 hns3_nic_net_stop(netdev);
3531 3920
3532 hns3_clear_all_ring(h);
3533
3534 ret = hns3_nic_uninit_vector_data(priv); 3921 ret = hns3_nic_uninit_vector_data(priv);
3535 if (ret) { 3922 if (ret) {
3536 dev_err(&netdev->dev, 3923 dev_err(&netdev->dev,
@@ -3538,15 +3925,7 @@ int hns3_set_channels(struct net_device *netdev,
3538 goto open_netdev; 3925 goto open_netdev;
3539 } 3926 }
3540 3927
3541 /* Changing the tqp num may also change the vector num, 3928 hns3_store_coal(priv);
3542 * ethtool only support setting and querying one coal
3543 * configuation for now, so save the vector 0' coal
3544 * configuation here in order to restore it.
3545 */
3546 memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
3547 sizeof(struct hns3_enet_coalesce));
3548 memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
3549 sizeof(struct hns3_enet_coalesce));
3550 3929
3551 hns3_nic_dealloc_vector_data(priv); 3930 hns3_nic_dealloc_vector_data(priv);
3552 3931
@@ -3554,10 +3933,9 @@ int hns3_set_channels(struct net_device *netdev,
3554 hns3_put_ring_config(priv); 3933 hns3_put_ring_config(priv);
3555 3934
3556 org_tqp_num = h->kinfo.num_tqps; 3935 org_tqp_num = h->kinfo.num_tqps;
3557 ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal); 3936 ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3558 if (ret) { 3937 if (ret) {
3559 ret = hns3_modify_tqp_num(netdev, org_tqp_num, 3938 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3560 &tx_coal, &rx_coal);
3561 if (ret) { 3939 if (ret) {
3562 /* If revert to old tqp failed, fatal error occurred */ 3940 /* If revert to old tqp failed, fatal error occurred */
3563 dev_err(&netdev->dev, 3941 dev_err(&netdev->dev,
@@ -3600,6 +3978,8 @@ static int __init hns3_init_module(void)
3600 3978
3601 client.ops = &client_ops; 3979 client.ops = &client_ops;
3602 3980
3981 INIT_LIST_HEAD(&client.node);
3982
3603 ret = hnae3_register_client(&client); 3983 ret = hnae3_register_client(&client);
3604 if (ret) 3984 if (ret)
3605 return ret; 3985 return ret;
@@ -3627,3 +4007,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3627MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4007MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3628MODULE_LICENSE("GPL"); 4008MODULE_LICENSE("GPL");
3629MODULE_ALIAS("pci:hns-nic"); 4009MODULE_ALIAS("pci:hns-nic");
4010MODULE_VERSION(HNS3_MOD_VERSION);