aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-03 18:08:18 -0400
committerDavid S. Miller <davem@davemloft.net>2017-08-03 18:08:18 -0400
commitf4b2a42073fb9b51366e379b6d79e2d1290fcb75 (patch)
tree1ae838d0e8244c45dcafe12c959f69cfd2677d2f
parent5477f7f3eb950a0598bd70e5d9c0bdc11582d3b2 (diff)
parent15e8e5ffd624702ba2fa0d27566069922561ae95 (diff)
Merge branch 'hns3-ethernet-driver'
Salil Mehta says: ==================== Hisilicon Network Subsystem 3 Ethernet Driver This patch-set contains the support of the HNS3 (Hisilicon Network Subsystem 3) Ethernet driver for hip08 family of SoCs and future upcoming SoCs. Hisilicon's new hip08 SoCs have integrated ethernet based on PCI Express and hence there was a need of new driver over the previous HNS driver which is already part of the Linux mainline. This new driver is NOT backward compatible with HNS. This current driver is meant to control the Physical Function and there would soon be a support of a separate driver for Virtual Function once this base PF driver has been accepted. Also, this driver is the ongoing development work and HNS3 Ethernet driver would be incrementally enhanced with more new features. High Level Architecture: [ Ethtool ] ^ | | | [Ethernet Client] [ODP/UIO Client] . . . [ RoCE Client ] | | [ HNAE Device ] | | | --------------------------------------------- | | | [ HNAE3 Framework (Register/unregister) ] | | | --------------------------------------------- | | | [ HCLGE Layer] | ________________|_________________ | | | | | [ MDIO ] [ Scheduler/Shaper ] [ Debugfs* ] | | | | | |________________|_________________| | | | [ IMP command Interface ] | --------------------------------------------- | HIP08 H A R D W A R E * Current patch-set broadly adds the support of the following PF functionality: 1. Basic Rx and Tx functionality 2. TSO support 3. Ethtool support 4. * Debugfs support -> this patch for now has been taken off. 5. HNAE framework and hardware compatability layer 6. Scheduler and Shaper support in transmit function 7. MDIO support Change Log: V5->V6: Addressed below comments: * Andrew Lunn: Comments on MDIO and ethtool link mode * Leon Romanvosky: Some comments on HNAE layer tidy-up * Internal comments on redundant code removal, fixing error types etc. V4->V5: Addressed below concerns: * Florian Fanelli: Miscellaneous comments on ethtool & enet layer * Stephen Hemminger: comment of Netdev stats in ethool layer * Leon Romanvosky: Comments on Driver Version String, naming & Kconfig * Rochard Cochran: Redundant function prototype V3->V4: Addressed below comments: * Andrew Lunn: Various comments on MDIO, ethtool, ENET driver etc, * Stephen Hemminger: change access and updation to 64 but statistics * Bo You: some spelling mistakes and checkpatch.pl errors. V2->V3: Addressed comments * Yuval Mintz: Removal of redundant userprio-to-tc code * Stephen Hemminger: Ethtool & interuupt enable * Andrew Lunn: On C45/C22 PHy support, HNAE, ethtool * Florian Fainelli: C45/C22 and phy_connect/attach * Intel kbuild errors V1->V2: Addressed some comments by kbuild, Yuval MIntz, Andrew Lunn & Florian Fainelli in the following patches: * Add support of HNS3 Ethernet Driver for hip08 SoC * Add MDIO support to HNS3 Ethernet driver for hip08 SoC * Add support of debugfs interface to HNS3 driver ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c300
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h444
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c356
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h740
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4267
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h519
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c213
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c1015
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c2848
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h592
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c482
18 files changed, 11953 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index c67618c97c5d..a515da73c7e4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6148,6 +6148,14 @@ S: Maintained
6148F: drivers/net/ethernet/hisilicon/ 6148F: drivers/net/ethernet/hisilicon/
6149F: Documentation/devicetree/bindings/net/hisilicon*.txt 6149F: Documentation/devicetree/bindings/net/hisilicon*.txt
6150 6150
6151HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
6152M: Yisen Zhuang <yisen.zhuang@huawei.com>
6153M: Salil Mehta <salil.mehta@huawei.com>
6154L: netdev@vger.kernel.org
6155W: http://www.hisilicon.com
6156S: Maintained
6157F: drivers/net/ethernet/hisilicon/hns3/
6158
6151HISILICON ROCE DRIVER 6159HISILICON ROCE DRIVER
6152M: Lijun Ou <oulijun@huawei.com> 6160M: Lijun Ou <oulijun@huawei.com>
6153M: Wei Hu(Xavier) <xavier.huwei@huawei.com> 6161M: Wei Hu(Xavier) <xavier.huwei@huawei.com>
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index d11287e11371..91c7bdb9b43c 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -76,4 +76,31 @@ config HNS_ENET
76 This selects the general ethernet driver for HNS. This module make 76 This selects the general ethernet driver for HNS. This module make
77 use of any HNS AE driver, such as HNS_DSAF 77 use of any HNS AE driver, such as HNS_DSAF
78 78
79config HNS3
80 tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
81 depends on PCI
82 ---help---
83 This selects the framework support for Hisilicon Network Subsystem 3.
84 This layer facilitates clients like ENET, RoCE and user-space ethernet
85 drivers(like ODP)to register with HNAE devices and their associated
86 operations.
87
88config HNS3_HCLGE
89 tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
90 depends on PCI_MSI
91 depends on HNS3
92 ---help---
93 This selects the HNS3_HCLGE network acceleration engine & its hardware
94 compatibility layer. The engine would be used in Hisilicon hip08 family of
95 SoCs and further upcoming SoCs.
96
97config HNS3_ENET
98 tristate "Hisilicon HNS3 Ethernet Device Support"
99 depends on 64BIT && PCI
100 depends on HNS3 && HNS3_HCLGE
101 ---help---
102 This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
103 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
104 devices and their associated operations.
105
79endif # NET_VENDOR_HISILICON 106endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 8661695024dc..3828c435c18f 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
6obj-$(CONFIG_HIP04_ETH) += hip04_eth.o 6obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
7obj-$(CONFIG_HNS_MDIO) += hns_mdio.o 7obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
8obj-$(CONFIG_HNS) += hns/ 8obj-$(CONFIG_HNS) += hns/
9obj-$(CONFIG_HNS3) += hns3/
9obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o 10obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
new file mode 100644
index 000000000000..a9349e1f3e51
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the HISILICON network device drivers.
3#
4
5obj-$(CONFIG_HNS3) += hns3pf/
6
7obj-$(CONFIG_HNS3) += hnae3.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
new file mode 100644
index 000000000000..59efbd605416
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -0,0 +1,300 @@
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/list.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13
14#include "hnae3.h"
15
16static LIST_HEAD(hnae3_ae_algo_list);
17static LIST_HEAD(hnae3_client_list);
18static LIST_HEAD(hnae3_ae_dev_list);
19
20/* we are keeping things simple and using single lock for all the
21 * list. This is a non-critical code so other updations, if happen
22 * in parallel, can wait.
23 */
24static DEFINE_MUTEX(hnae3_common_lock);
25
26static bool hnae3_client_match(enum hnae3_client_type client_type,
27 enum hnae3_dev_type dev_type)
28{
29 if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC ||
30 client_type == HNAE3_CLIENT_ROCE))
31 return true;
32
33 if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC)
34 return true;
35
36 return false;
37}
38
39static int hnae3_match_n_instantiate(struct hnae3_client *client,
40 struct hnae3_ae_dev *ae_dev,
41 bool is_reg, bool *matched)
42{
43 int ret;
44
45 *matched = false;
46
47 /* check if this client matches the type of ae_dev */
48 if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
49 hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
50 return 0;
51 }
52 /* there is a match of client and dev */
53 *matched = true;
54
55 /* now, (un-)instantiate client by calling lower layer */
56 if (is_reg) {
57 ret = ae_dev->ops->init_client_instance(client, ae_dev);
58 if (ret)
59 dev_err(&ae_dev->pdev->dev,
60 "fail to instantiate client\n");
61 return ret;
62 }
63
64 ae_dev->ops->uninit_client_instance(client, ae_dev);
65 return 0;
66}
67
68int hnae3_register_client(struct hnae3_client *client)
69{
70 struct hnae3_client *client_tmp;
71 struct hnae3_ae_dev *ae_dev;
72 bool matched;
73 int ret = 0;
74
75 mutex_lock(&hnae3_common_lock);
76 /* one system should only have one client for every type */
77 list_for_each_entry(client_tmp, &hnae3_client_list, node) {
78 if (client_tmp->type == client->type)
79 goto exit;
80 }
81
82 list_add_tail(&client->node, &hnae3_client_list);
83
84 /* initialize the client on every matched port */
85 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
86 /* if the client could not be initialized on current port, for
87 * any error reasons, move on to next available port
88 */
89 ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched);
90 if (ret)
91 dev_err(&ae_dev->pdev->dev,
92 "match and instantiation failed for port\n");
93 }
94
95exit:
96 mutex_unlock(&hnae3_common_lock);
97
98 return ret;
99}
100EXPORT_SYMBOL(hnae3_register_client);
101
102void hnae3_unregister_client(struct hnae3_client *client)
103{
104 struct hnae3_ae_dev *ae_dev;
105 bool matched;
106
107 mutex_lock(&hnae3_common_lock);
108 /* un-initialize the client on every matched port */
109 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
110 hnae3_match_n_instantiate(client, ae_dev, false, &matched);
111 }
112
113 list_del(&client->node);
114 mutex_unlock(&hnae3_common_lock);
115}
116EXPORT_SYMBOL(hnae3_unregister_client);
117
118/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework
119 * @ae_algo: AE algorithm
120 * NOTE: the duplicated name will not be checked
121 */
122int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
123{
124 const struct pci_device_id *id;
125 struct hnae3_ae_dev *ae_dev;
126 struct hnae3_client *client;
127 bool matched;
128 int ret = 0;
129
130 mutex_lock(&hnae3_common_lock);
131
132 list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
133
134 /* Check if this algo/ops matches the list of ae_devs */
135 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
136 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
137 if (!id)
138 continue;
139
140 /* ae_dev init should set flag */
141 ae_dev->ops = ae_algo->ops;
142 ret = ae_algo->ops->init_ae_dev(ae_dev);
143 if (ret) {
144 dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n");
145 continue;
146 }
147
148 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
149
150 /* check the client list for the match with this ae_dev type and
151 * initialize the figure out client instance
152 */
153 list_for_each_entry(client, &hnae3_client_list, node) {
154 ret = hnae3_match_n_instantiate(client, ae_dev, true,
155 &matched);
156 if (ret)
157 dev_err(&ae_dev->pdev->dev,
158 "match and instantiation failed\n");
159 if (matched)
160 break;
161 }
162 }
163
164 mutex_unlock(&hnae3_common_lock);
165
166 return ret;
167}
168EXPORT_SYMBOL(hnae3_register_ae_algo);
169
170/* hnae3_unregister_ae_algo - unregisters a AE algorithm
171 * @ae_algo: the AE algorithm to unregister
172 */
173void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
174{
175 const struct pci_device_id *id;
176 struct hnae3_ae_dev *ae_dev;
177 struct hnae3_client *client;
178 bool matched;
179
180 mutex_lock(&hnae3_common_lock);
181 /* Check if there are matched ae_dev */
182 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
183 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
184 if (!id)
185 continue;
186
187 /* check the client list for the match with this ae_dev type and
188 * un-initialize the figure out client instance
189 */
190 list_for_each_entry(client, &hnae3_client_list, node) {
191 hnae3_match_n_instantiate(client, ae_dev, false,
192 &matched);
193 if (matched)
194 break;
195 }
196
197 ae_algo->ops->uninit_ae_dev(ae_dev);
198 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
199 }
200
201 list_del(&ae_algo->node);
202 mutex_unlock(&hnae3_common_lock);
203}
204EXPORT_SYMBOL(hnae3_unregister_ae_algo);
205
206/* hnae3_register_ae_dev - registers a AE device to hnae3 framework
207 * @ae_dev: the AE device
208 * NOTE: the duplicated name will not be checked
209 */
210int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
211{
212 const struct pci_device_id *id;
213 struct hnae3_ae_algo *ae_algo;
214 struct hnae3_client *client;
215 bool matched;
216 int ret = 0;
217
218 mutex_lock(&hnae3_common_lock);
219 list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
220
221 /* Check if there are matched ae_algo */
222 list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
223 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
224 if (!id)
225 continue;
226
227 ae_dev->ops = ae_algo->ops;
228
229 if (!ae_dev->ops) {
230 dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
231 goto out_err;
232 }
233
234 /* ae_dev init should set flag */
235 ret = ae_dev->ops->init_ae_dev(ae_dev);
236 if (ret) {
237 dev_err(&ae_dev->pdev->dev, "init ae_dev error\n");
238 goto out_err;
239 }
240
241 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
242 break;
243 }
244
245 /* check the client list for the match with this ae_dev type and
246 * initialize the figure out client instance
247 */
248 list_for_each_entry(client, &hnae3_client_list, node) {
249 ret = hnae3_match_n_instantiate(client, ae_dev, true,
250 &matched);
251 if (ret)
252 dev_err(&ae_dev->pdev->dev,
253 "match and instantiation failed\n");
254 if (matched)
255 break;
256 }
257
258out_err:
259 mutex_unlock(&hnae3_common_lock);
260
261 return ret;
262}
263EXPORT_SYMBOL(hnae3_register_ae_dev);
264
265/* hnae3_unregister_ae_dev - unregisters a AE device
266 * @ae_dev: the AE device to unregister
267 */
268void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
269{
270 const struct pci_device_id *id;
271 struct hnae3_ae_algo *ae_algo;
272 struct hnae3_client *client;
273 bool matched;
274
275 mutex_lock(&hnae3_common_lock);
276 /* Check if there are matched ae_algo */
277 list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
278 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
279 if (!id)
280 continue;
281
282 list_for_each_entry(client, &hnae3_client_list, node) {
283 hnae3_match_n_instantiate(client, ae_dev, false,
284 &matched);
285 if (matched)
286 break;
287 }
288
289 ae_algo->ops->uninit_ae_dev(ae_dev);
290 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
291 }
292
293 list_del(&ae_dev->node);
294 mutex_unlock(&hnae3_common_lock);
295}
296EXPORT_SYMBOL(hnae3_unregister_ae_dev);
297
298MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
299MODULE_LICENSE("GPL");
300MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
new file mode 100644
index 000000000000..b2f28ae81273
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -0,0 +1,444 @@
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __HNAE3_H
11#define __HNAE3_H
12
13/* Names used in this framework:
14 * ae handle (handle):
15 * a set of queues provided by AE
16 * ring buffer queue (rbq):
17 * the channel between upper layer and the AE, can do tx and rx
18 * ring:
19 * a tx or rx channel within a rbq
20 * ring description (desc):
21 * an element in the ring with packet information
22 * buffer:
23 * a memory region referred by desc with the full packet payload
24 *
25 * "num" means a static number set as a parameter, "count" mean a dynamic
26 * number set while running
27 * "cb" means control block
28 */
29
30#include <linux/acpi.h>
31#include <linux/delay.h>
32#include <linux/device.h>
33#include <linux/module.h>
34#include <linux/netdevice.h>
35#include <linux/pci.h>
36#include <linux/types.h>
37
38/* Device IDs */
39#define HNAE3_DEV_ID_GE 0xA220
40#define HNAE3_DEV_ID_25GE 0xA221
41#define HNAE3_DEV_ID_25GE_RDMA 0xA222
42#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223
43#define HNAE3_DEV_ID_50GE_RDMA 0xA224
44#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225
45#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226
46#define HNAE3_DEV_ID_100G_VF 0xA22E
47#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F
48
49#define HNAE3_CLASS_NAME_SIZE 16
50
51#define HNAE3_DEV_INITED_B 0x0
52#define HNAE_DEV_SUPPORT_ROCE_B 0x1
53
54#define ring_ptr_move_fw(ring, p) \
55 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
56#define ring_ptr_move_bw(ring, p) \
57 ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
58
59enum hns_desc_type {
60 DESC_TYPE_SKB,
61 DESC_TYPE_PAGE,
62};
63
64struct hnae3_handle;
65
66struct hnae3_queue {
67 void __iomem *io_base;
68 struct hnae3_ae_algo *ae_algo;
69 struct hnae3_handle *handle;
70 int tqp_index; /* index in a handle */
71 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
72 u16 desc_num; /* total number of desc */
73};
74
75/*hnae3 loop mode*/
76enum hnae3_loop {
77 HNAE3_MAC_INTER_LOOP_MAC,
78 HNAE3_MAC_INTER_LOOP_SERDES,
79 HNAE3_MAC_INTER_LOOP_PHY,
80 HNAE3_MAC_LOOP_NONE,
81};
82
83enum hnae3_client_type {
84 HNAE3_CLIENT_KNIC,
85 HNAE3_CLIENT_UNIC,
86 HNAE3_CLIENT_ROCE,
87};
88
89enum hnae3_dev_type {
90 HNAE3_DEV_KNIC,
91 HNAE3_DEV_UNIC,
92};
93
94/* mac media type */
95enum hnae3_media_type {
96 HNAE3_MEDIA_TYPE_UNKNOWN,
97 HNAE3_MEDIA_TYPE_FIBER,
98 HNAE3_MEDIA_TYPE_COPPER,
99 HNAE3_MEDIA_TYPE_BACKPLANE,
100};
101
102struct hnae3_vector_info {
103 u8 __iomem *io_addr;
104 int vector;
105};
106
107#define HNAE3_RING_TYPE_B 0
108#define HNAE3_RING_TYPE_TX 0
109#define HNAE3_RING_TYPE_RX 1
110
111struct hnae3_ring_chain_node {
112 struct hnae3_ring_chain_node *next;
113 u32 tqp_index;
114 u32 flag;
115};
116
117#define HNAE3_IS_TX_RING(node) \
118 (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
119
120struct hnae3_client_ops {
121 int (*init_instance)(struct hnae3_handle *handle);
122 void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
123 void (*link_status_change)(struct hnae3_handle *handle, bool state);
124};
125
126#define HNAE3_CLIENT_NAME_LENGTH 16
127struct hnae3_client {
128 char name[HNAE3_CLIENT_NAME_LENGTH];
129 u16 version;
130 unsigned long state;
131 enum hnae3_client_type type;
132 const struct hnae3_client_ops *ops;
133 struct list_head node;
134};
135
136struct hnae3_ae_dev {
137 struct pci_dev *pdev;
138 const struct hnae3_ae_ops *ops;
139 struct list_head node;
140 u32 flag;
141 enum hnae3_dev_type dev_type;
142 void *priv;
143};
144
145/* This struct defines the operation on the handle.
146 *
147 * init_ae_dev(): (mandatory)
148 * Get PF configure from pci_dev and initialize PF hardware
149 * uninit_ae_dev()
150 * Disable PF device and release PF resource
151 * register_client
152 * Register client to ae_dev
153 * unregister_client()
154 * Unregister client from ae_dev
155 * start()
156 * Enable the hardware
157 * stop()
158 * Disable the hardware
159 * get_status()
160 * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
161 * non-ok
162 * get_ksettings_an_result()
163 * Get negotiation status,speed and duplex
164 * update_speed_duplex_h()
165 * Update hardware speed and duplex
166 * get_media_type()
167 * Get media type of MAC
168 * adjust_link()
169 * Adjust link status
170 * set_loopback()
171 * Set loopback
172 * set_promisc_mode
173 * Set promisc mode
174 * set_mtu()
175 * set mtu
176 * get_pauseparam()
177 * get tx and rx of pause frame use
178 * set_pauseparam()
179 * set tx and rx of pause frame use
180 * set_autoneg()
181 * set auto autonegotiation of pause frame use
182 * get_autoneg()
183 * get auto autonegotiation of pause frame use
184 * get_coalesce_usecs()
185 * get usecs to delay a TX interrupt after a packet is sent
186 * get_rx_max_coalesced_frames()
187 * get Maximum number of packets to be sent before a TX interrupt.
188 * set_coalesce_usecs()
189 * set usecs to delay a TX interrupt after a packet is sent
190 * set_coalesce_frames()
191 * set Maximum number of packets to be sent before a TX interrupt.
192 * get_mac_addr()
193 * get mac address
194 * set_mac_addr()
195 * set mac address
196 * add_uc_addr
197 * Add unicast addr to mac table
198 * rm_uc_addr
199 * Remove unicast addr from mac table
200 * set_mc_addr()
201 * Set multicast address
202 * add_mc_addr
203 * Add multicast address to mac table
204 * rm_mc_addr
205 * Remove multicast address from mac table
206 * update_stats()
207 * Update Old network device statistics
208 * get_ethtool_stats()
209 * Get ethtool network device statistics
210 * get_strings()
211 * Get a set of strings that describe the requested objects
212 * get_sset_count()
213 * Get number of strings that @get_strings will write
214 * update_led_status()
215 * Update the led status
216 * set_led_id()
217 * Set led id
218 * get_regs()
219 * Get regs dump
220 * get_regs_len()
221 * Get the len of the regs dump
222 * get_rss_key_size()
223 * Get rss key size
224 * get_rss_indir_size()
225 * Get rss indirection table size
226 * get_rss()
227 * Get rss table
228 * set_rss()
229 * Set rss table
230 * get_tc_size()
231 * Get tc size of handle
232 * get_vector()
233 * Get vector number and vector information
234 * map_ring_to_vector()
235 * Map rings to vector
236 * unmap_ring_from_vector()
237 * Unmap rings from vector
238 * add_tunnel_udp()
239 * Add tunnel information to hardware
240 * del_tunnel_udp()
241 * Delete tunnel information from hardware
242 * reset_queue()
243 * Reset queue
244 * get_fw_version()
245 * Get firmware version
246 * get_mdix_mode()
247 * Get media typr of phy
248 * set_vlan_filter()
249 * Set vlan filter config of Ports
250 * set_vf_vlan_filter()
251 * Set vlan filter config of vf
252 */
253struct hnae3_ae_ops {
254 int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
255 void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
256
257 int (*init_client_instance)(struct hnae3_client *client,
258 struct hnae3_ae_dev *ae_dev);
259 void (*uninit_client_instance)(struct hnae3_client *client,
260 struct hnae3_ae_dev *ae_dev);
261 int (*start)(struct hnae3_handle *handle);
262 void (*stop)(struct hnae3_handle *handle);
263 int (*get_status)(struct hnae3_handle *handle);
264 void (*get_ksettings_an_result)(struct hnae3_handle *handle,
265 u8 *auto_neg, u32 *speed, u8 *duplex);
266
267 int (*update_speed_duplex_h)(struct hnae3_handle *handle);
268 int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
269 u8 duplex);
270
271 void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type);
272 void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex);
273 int (*set_loopback)(struct hnae3_handle *handle,
274 enum hnae3_loop loop_mode, bool en);
275
276 void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en);
277 int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
278
279 void (*get_pauseparam)(struct hnae3_handle *handle,
280 u32 *auto_neg, u32 *rx_en, u32 *tx_en);
281 int (*set_pauseparam)(struct hnae3_handle *handle,
282 u32 auto_neg, u32 rx_en, u32 tx_en);
283
284 int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
285 int (*get_autoneg)(struct hnae3_handle *handle);
286
287 void (*get_coalesce_usecs)(struct hnae3_handle *handle,
288 u32 *tx_usecs, u32 *rx_usecs);
289 void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle,
290 u32 *tx_frames, u32 *rx_frames);
291 int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout);
292 int (*set_coalesce_frames)(struct hnae3_handle *handle,
293 u32 coalesce_frames);
294 void (*get_coalesce_range)(struct hnae3_handle *handle,
295 u32 *tx_frames_low, u32 *rx_frames_low,
296 u32 *tx_frames_high, u32 *rx_frames_high,
297 u32 *tx_usecs_low, u32 *rx_usecs_low,
298 u32 *tx_usecs_high, u32 *rx_usecs_high);
299
300 void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
301 int (*set_mac_addr)(struct hnae3_handle *handle, void *p);
302 int (*add_uc_addr)(struct hnae3_handle *handle,
303 const unsigned char *addr);
304 int (*rm_uc_addr)(struct hnae3_handle *handle,
305 const unsigned char *addr);
306 int (*set_mc_addr)(struct hnae3_handle *handle, void *addr);
307 int (*add_mc_addr)(struct hnae3_handle *handle,
308 const unsigned char *addr);
309 int (*rm_mc_addr)(struct hnae3_handle *handle,
310 const unsigned char *addr);
311
312 void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
313 void (*update_stats)(struct hnae3_handle *handle,
314 struct net_device_stats *net_stats);
315 void (*get_stats)(struct hnae3_handle *handle, u64 *data);
316
317 void (*get_strings)(struct hnae3_handle *handle,
318 u32 stringset, u8 *data);
319 int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
320
321 void (*get_regs)(struct hnae3_handle *handle, void *data);
322 int (*get_regs_len)(struct hnae3_handle *handle);
323
324 u32 (*get_rss_key_size)(struct hnae3_handle *handle);
325 u32 (*get_rss_indir_size)(struct hnae3_handle *handle);
326 int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
327 u8 *hfunc);
328 int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
329 const u8 *key, const u8 hfunc);
330
331 int (*get_tc_size)(struct hnae3_handle *handle);
332
333 int (*get_vector)(struct hnae3_handle *handle, u16 vector_num,
334 struct hnae3_vector_info *vector_info);
335 int (*map_ring_to_vector)(struct hnae3_handle *handle,
336 int vector_num,
337 struct hnae3_ring_chain_node *vr_chain);
338 int (*unmap_ring_from_vector)(struct hnae3_handle *handle,
339 int vector_num,
340 struct hnae3_ring_chain_node *vr_chain);
341
342 int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
343 int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num);
344
345 void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
346 u32 (*get_fw_version)(struct hnae3_handle *handle);
347 void (*get_mdix_mode)(struct hnae3_handle *handle,
348 u8 *tp_mdix_ctrl, u8 *tp_mdix);
349
350 int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
351 u16 vlan_id, bool is_kill);
352 int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
353 u16 vlan, u8 qos, __be16 proto);
354};
355
356struct hnae3_ae_algo {
357 const struct hnae3_ae_ops *ops;
358 struct list_head node;
359 char name[HNAE3_CLASS_NAME_SIZE];
360 const struct pci_device_id *pdev_id_table;
361};
362
363#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16)
364#define HNAE3_ITR_COUNTDOWN_START 100
365
366struct hnae3_tc_info {
367 u16 tqp_offset; /* TQP offset from base TQP */
368 u16 tqp_count; /* Total TQPs */
369 u8 up; /* user priority */
370 u8 tc; /* TC index */
371 bool enable; /* If this TC is enable or not */
372};
373
374#define HNAE3_MAX_TC 8
375struct hnae3_knic_private_info {
376 struct net_device *netdev; /* Set by KNIC client when init instance */
377 u16 rss_size; /* Allocated RSS queues */
378 u16 rx_buf_len;
379 u16 num_desc;
380
381 u8 num_tc; /* Total number of enabled TCs */
382 struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
383
384 u16 num_tqps; /* total number of TQPs in this handle */
385 struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
386};
387
388struct hnae3_roce_private_info {
389 struct net_device *netdev;
390 void __iomem *roce_io_base;
391 int base_vector;
392 int num_vectors;
393};
394
395struct hnae3_unic_private_info {
396 struct net_device *netdev;
397 u16 rx_buf_len;
398 u16 num_desc;
399 u16 num_tqps; /* total number of tqps in this handle */
400 struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
401};
402
403#define HNAE3_SUPPORT_MAC_LOOPBACK 1
404#define HNAE3_SUPPORT_PHY_LOOPBACK 2
405#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
406
407struct hnae3_handle {
408 struct hnae3_client *client;
409 struct pci_dev *pdev;
410 void *priv;
411 struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
412 u64 flags; /* Indicate the capabilities for this handle*/
413
414 union {
415 struct net_device *netdev; /* first member */
416 struct hnae3_knic_private_info kinfo;
417 struct hnae3_unic_private_info uinfo;
418 struct hnae3_roce_private_info rinfo;
419 };
420
421 u32 numa_node_mask; /* for multi-chip support */
422};
423
424#define hnae_set_field(origin, mask, shift, val) \
425 do { \
426 (origin) &= (~(mask)); \
427 (origin) |= ((val) << (shift)) & (mask); \
428 } while (0)
429#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
430
431#define hnae_set_bit(origin, shift, val) \
432 hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
433#define hnae_get_bit(origin, shift) \
434 hnae_get_field((origin), (0x1 << (shift)), (shift))
435
436int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
437void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
438
439void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
440int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
441
442void hnae3_unregister_client(struct hnae3_client *client);
443int hnae3_register_client(struct hnae3_client *client);
444#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
new file mode 100644
index 000000000000..162e8a42acd0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the HISILICON network device drivers.
3#
4
5ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
6
7obj-$(CONFIG_HNS3_HCLGE) += hclge.o
8hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
9
10obj-$(CONFIG_HNS3_ENET) += hns3.o
11hns3-objs = hns3_enet.o hns3_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
new file mode 100644
index 000000000000..bc869842728f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -0,0 +1,356 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/slab.h>
12#include <linux/pci.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/dma-direction.h>
16#include "hclge_cmd.h"
17#include "hnae3.h"
18#include "hclge_main.h"
19
20#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
21#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
22 DMA_TO_DEVICE : DMA_FROM_DEVICE)
23#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
24
25static int hclge_ring_space(struct hclge_cmq_ring *ring)
26{
27 int ntu = ring->next_to_use;
28 int ntc = ring->next_to_clean;
29 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
30
31 return ring->desc_num - used - 1;
32}
33
34static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
35{
36 int size = ring->desc_num * sizeof(struct hclge_desc);
37
38 ring->desc = kzalloc(size, GFP_KERNEL);
39 if (!ring->desc)
40 return -ENOMEM;
41
42 ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
43 size, DMA_BIDIRECTIONAL);
44 if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
45 ring->desc_dma_addr = 0;
46 kfree(ring->desc);
47 ring->desc = NULL;
48 return -ENOMEM;
49 }
50
51 return 0;
52}
53
54static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
55{
56 dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
57 ring->desc_num * sizeof(ring->desc[0]),
58 DMA_BIDIRECTIONAL);
59
60 ring->desc_dma_addr = 0;
61 kfree(ring->desc);
62 ring->desc = NULL;
63}
64
65static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
66{
67 struct hclge_hw *hw = &hdev->hw;
68 struct hclge_cmq_ring *ring =
69 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
70 int ret;
71
72 ring->flag = ring_type;
73 ring->dev = hdev;
74
75 ret = hclge_alloc_cmd_desc(ring);
76 if (ret) {
77 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
78 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
79 return ret;
80 }
81
82 ring->next_to_clean = 0;
83 ring->next_to_use = 0;
84
85 return 0;
86}
87
88void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
89 enum hclge_opcode_type opcode, bool is_read)
90{
91 memset((void *)desc, 0, sizeof(struct hclge_desc));
92 desc->opcode = cpu_to_le16(opcode);
93 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
94
95 if (is_read)
96 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
97 else
98 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
99}
100
101static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
102{
103 dma_addr_t dma = ring->desc_dma_addr;
104 struct hclge_dev *hdev = ring->dev;
105 struct hclge_hw *hw = &hdev->hw;
106
107 if (ring->flag == HCLGE_TYPE_CSQ) {
108 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
109 (u32)dma);
110 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
111 (u32)((dma >> 31) >> 1));
112 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
113 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
114 HCLGE_NIC_CMQ_ENABLE);
115 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
117 } else {
118 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
119 (u32)dma);
120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121 (u32)((dma >> 31) >> 1));
122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
124 HCLGE_NIC_CMQ_ENABLE);
125 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
127 }
128}
129
130static void hclge_cmd_init_regs(struct hclge_hw *hw)
131{
132 hclge_cmd_config_regs(&hw->cmq.csq);
133 hclge_cmd_config_regs(&hw->cmq.crq);
134}
135
136static int hclge_cmd_csq_clean(struct hclge_hw *hw)
137{
138 struct hclge_cmq_ring *csq = &hw->cmq.csq;
139 u16 ntc = csq->next_to_clean;
140 struct hclge_desc *desc;
141 int clean = 0;
142 u32 head;
143
144 desc = &csq->desc[ntc];
145 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
146
147 while (head != ntc) {
148 memset(desc, 0, sizeof(*desc));
149 ntc++;
150 if (ntc == csq->desc_num)
151 ntc = 0;
152 desc = &csq->desc[ntc];
153 clean++;
154 }
155 csq->next_to_clean = ntc;
156
157 return clean;
158}
159
160static int hclge_cmd_csq_done(struct hclge_hw *hw)
161{
162 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
163 return head == hw->cmq.csq.next_to_use;
164}
165
166static bool hclge_is_special_opcode(u16 opcode)
167{
168 u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032};
169 int i;
170
171 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
172 if (spec_opcode[i] == opcode)
173 return true;
174 }
175
176 return false;
177}
178
179/**
180 * hclge_cmd_send - send command to command queue
181 * @hw: pointer to the hw struct
182 * @desc: prefilled descriptor for describing the command
183 * @num : the number of descriptors to be sent
184 *
185 * This is the main send command for command queue, it
186 * sends the queue, cleans the queue, etc
187 **/
188int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
189{
190 struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
191 struct hclge_desc *desc_to_use;
192 bool complete = false;
193 u32 timeout = 0;
194 int handle = 0;
195 int retval = 0;
196 u16 opcode, desc_ret;
197 int ntc;
198
199 spin_lock_bh(&hw->cmq.csq.lock);
200
201 if (num > hclge_ring_space(&hw->cmq.csq)) {
202 spin_unlock_bh(&hw->cmq.csq.lock);
203 return -EBUSY;
204 }
205
206 /**
207 * Record the location of desc in the ring for this time
208 * which will be use for hardware to write back
209 */
210 ntc = hw->cmq.csq.next_to_use;
211 opcode = desc[0].opcode;
212 while (handle < num) {
213 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
214 *desc_to_use = desc[handle];
215 (hw->cmq.csq.next_to_use)++;
216 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
217 hw->cmq.csq.next_to_use = 0;
218 handle++;
219 }
220
221 /* Write to hardware */
222 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
223
224 /**
225 * If the command is sync, wait for the firmware to write back,
226 * if multi descriptors to be sent, use the first one to check
227 */
228 if (HCLGE_SEND_SYNC(desc->flag)) {
229 do {
230 if (hclge_cmd_csq_done(hw))
231 break;
232 udelay(1);
233 timeout++;
234 } while (timeout < hw->cmq.tx_timeout);
235 }
236
237 if (hclge_cmd_csq_done(hw)) {
238 complete = true;
239 handle = 0;
240 while (handle < num) {
241 /* Get the result of hardware write back */
242 desc_to_use = &hw->cmq.csq.desc[ntc];
243 desc[handle] = *desc_to_use;
244 pr_debug("Get cmd desc:\n");
245
246 if (likely(!hclge_is_special_opcode(opcode)))
247 desc_ret = desc[handle].retval;
248 else
249 desc_ret = desc[0].retval;
250
251 if ((enum hclge_cmd_return_status)desc_ret ==
252 HCLGE_CMD_EXEC_SUCCESS)
253 retval = 0;
254 else
255 retval = -EIO;
256 hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
257 ntc++;
258 handle++;
259 if (ntc == hw->cmq.csq.desc_num)
260 ntc = 0;
261 }
262 }
263
264 if (!complete)
265 retval = -EAGAIN;
266
267 /* Clean the command send queue */
268 handle = hclge_cmd_csq_clean(hw);
269 if (handle != num) {
270 dev_warn(&hdev->pdev->dev,
271 "cleaned %d, need to clean %d\n", handle, num);
272 }
273
274 spin_unlock_bh(&hw->cmq.csq.lock);
275
276 return retval;
277}
278
279enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw,
280 u32 *version)
281{
282 struct hclge_query_version *resp;
283 struct hclge_desc desc;
284 int ret;
285
286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
287 resp = (struct hclge_query_version *)desc.data;
288
289 ret = hclge_cmd_send(hw, &desc, 1);
290 if (!ret)
291 *version = le32_to_cpu(resp->firmware);
292
293 return ret;
294}
295
296int hclge_cmd_init(struct hclge_dev *hdev)
297{
298 u32 version;
299 int ret;
300
301 /* Setup the queue entries for use cmd queue */
302 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
303 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
304
305 /* Setup the lock for command queue */
306 spin_lock_init(&hdev->hw.cmq.csq.lock);
307 spin_lock_init(&hdev->hw.cmq.crq.lock);
308
309 /* Setup Tx write back timeout */
310 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
311
312 /* Setup queue rings */
313 ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ);
314 if (ret) {
315 dev_err(&hdev->pdev->dev,
316 "CSQ ring setup error %d\n", ret);
317 return ret;
318 }
319
320 ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ);
321 if (ret) {
322 dev_err(&hdev->pdev->dev,
323 "CRQ ring setup error %d\n", ret);
324 goto err_csq;
325 }
326
327 hclge_cmd_init_regs(&hdev->hw);
328
329 ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
330 if (ret) {
331 dev_err(&hdev->pdev->dev,
332 "firmware version query failed %d\n", ret);
333 return ret;
334 }
335 hdev->fw_version = version;
336
337 dev_info(&hdev->pdev->dev, "The firware version is %08x\n", version);
338
339 return 0;
340err_csq:
341 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
342 return ret;
343}
344
345static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
346{
347 spin_lock_bh(&ring->lock);
348 hclge_free_cmd_desc(ring);
349 spin_unlock_bh(&ring->lock);
350}
351
352void hclge_destroy_cmd_queue(struct hclge_hw *hw)
353{
354 hclge_destroy_queue(&hw->cmq.csq);
355 hclge_destroy_queue(&hw->cmq.crq);
356}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
new file mode 100644
index 000000000000..91ae0135ee50
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -0,0 +1,740 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __HCLGE_CMD_H
11#define __HCLGE_CMD_H
12#include <linux/types.h>
13#include <linux/io.h>
14
15#define HCLGE_CMDQ_TX_TIMEOUT 1000
16
17struct hclge_dev;
18struct hclge_desc {
19 __le16 opcode;
20
21#define HCLGE_CMDQ_RX_INVLD_B 0
22#define HCLGE_CMDQ_RX_OUTVLD_B 1
23
24 __le16 flag;
25 __le16 retval;
26 __le16 rsv;
27 __le32 data[6];
28};
29
30struct hclge_desc_cb {
31 dma_addr_t dma;
32 void *va;
33 u32 length;
34};
35
36struct hclge_cmq_ring {
37 dma_addr_t desc_dma_addr;
38 struct hclge_desc *desc;
39 struct hclge_desc_cb *desc_cb;
40 struct hclge_dev *dev;
41 u32 head;
42 u32 tail;
43
44 u16 buf_size;
45 u16 desc_num;
46 int next_to_use;
47 int next_to_clean;
48 u8 flag;
49 spinlock_t lock; /* Command queue lock */
50};
51
52enum hclge_cmd_return_status {
53 HCLGE_CMD_EXEC_SUCCESS = 0,
54 HCLGE_CMD_NO_AUTH = 1,
55 HCLGE_CMD_NOT_EXEC = 2,
56 HCLGE_CMD_QUEUE_FULL = 3,
57};
58
59enum hclge_cmd_status {
60 HCLGE_STATUS_SUCCESS = 0,
61 HCLGE_ERR_CSQ_FULL = -1,
62 HCLGE_ERR_CSQ_TIMEOUT = -2,
63 HCLGE_ERR_CSQ_ERROR = -3,
64};
65
66struct hclge_cmq {
67 struct hclge_cmq_ring csq;
68 struct hclge_cmq_ring crq;
69 u16 tx_timeout; /* Tx timeout */
70 enum hclge_cmd_status last_status;
71};
72
73#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0
74#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1
75#define HCLGE_CMD_FLAG_NEXT_SHIFT 2
76#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3
77#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4
78#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5
79
80#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT)
81#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT)
82#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT)
83#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT)
84#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT)
85#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT)
86
87enum hclge_opcode_type {
88 /* Generic command */
89 HCLGE_OPC_QUERY_FW_VER = 0x0001,
90 HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
91 HCLGE_OPC_GBL_RST_STATUS = 0x0021,
92 HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
93 HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
94 HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
95 HCLGE_OPC_GET_CFG_PARAM = 0x0025,
96
97 HCLGE_OPC_STATS_64_BIT = 0x0030,
98 HCLGE_OPC_STATS_32_BIT = 0x0031,
99 HCLGE_OPC_STATS_MAC = 0x0032,
100 /* Device management command */
101
102 /* MAC commond */
103 HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
104 HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
105 HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
106 HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
107 HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
108 HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
109 /* MACSEC command */
110
111 /* PFC/Pause CMD*/
112 HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
113 HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
114 HCLGE_OPC_CFG_MAC_PARA = 0x0703,
115 HCLGE_OPC_CFG_PFC_PARA = 0x0704,
116 HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
117 HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
118 HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
119 HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
120 HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
121 HCLGE_OPC_QOS_MAP = 0x070A,
122
123 /* ETS/scheduler commands */
124 HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
125 HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
126 HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
127 HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
128 HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
129 HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
130 HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
131 HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
132 HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
133 HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
134 HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
135 HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
136 HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
137 HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
138 HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
139 HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
140 HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
141
142 /* Packet buffer allocate command */
143 HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
144 HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
145 HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
146 HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
147 HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
148 HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
149
150 /* PTP command */
151 /* TQP management command */
152 HCLGE_OPC_SET_TQP_MAP = 0x0A01,
153
154 /* TQP command */
155 HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
156 HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
157 HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
158 HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
159 HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
160 HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
161 HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
162 HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
163 HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
164 HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
165
166 /* TSO cmd */
167 HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
168
169 /* RSS cmd */
170 HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
171 HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
172 HCLGE_OPC_RSS_TC_MODE = 0x0D08,
173 HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
174
175 /* Promisuous mode command */
176 HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
177
178 /* Interrupts cmd */
179 HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
180 HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
181
182 /* MAC command */
183 HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
184 HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
185 HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
186 HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
187 HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
188 HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
189
190 /* Multicast linear table cmd */
191 HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
192 HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
193 HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
194 HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
195
196 /* VLAN command */
197 HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
198 HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
199 HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
200
201 /* MDIO command */
202 HCLGE_OPC_MDIO_CONFIG = 0x1900,
203
204 /* QCN command */
205 HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
206 HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
207 HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03,
208 HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
209 HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
210 HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
211 HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
212 HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
213
214 /* Mailbox cmd */
215 HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
216};
217
218#define HCLGE_TQP_REG_OFFSET 0x80000
219#define HCLGE_TQP_REG_SIZE 0x200
220
221#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
222#define HCLGE_RCB_INIT_FLAG_EN_B 0
223#define HCLGE_RCB_INIT_FLAG_FINI_B 8
224struct hclge_config_rcb_init {
225 __le16 rcb_init_flag;
226 u8 rsv[22];
227};
228
229struct hclge_tqp_map {
230 __le16 tqp_id; /* Absolute tqp id for in this pf */
231 u8 tqp_vf; /* VF id */
232#define HCLGE_TQP_MAP_TYPE_PF 0
233#define HCLGE_TQP_MAP_TYPE_VF 1
234#define HCLGE_TQP_MAP_TYPE_B 0
235#define HCLGE_TQP_MAP_EN_B 1
236 u8 tqp_flag; /* Indicate it's pf or vf tqp */
237 __le16 tqp_vid; /* Virtual id in this pf/vf */
238 u8 rsv[18];
239};
240
241#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11
242
243enum hclge_int_type {
244 HCLGE_INT_TX,
245 HCLGE_INT_RX,
246 HCLGE_INT_EVENT,
247};
248
249struct hclge_ctrl_vector_chain {
250 u8 int_vector_id;
251 u8 int_cause_num;
252#define HCLGE_INT_TYPE_S 0
253#define HCLGE_INT_TYPE_M 0x3
254#define HCLGE_TQP_ID_S 2
255#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S)
256 __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
257};
258
259#define HCLGE_TC_NUM 8
260#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
261#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
262struct hclge_tx_buff_alloc {
263 __le16 tx_pkt_buff[HCLGE_TC_NUM];
264 u8 tx_buff_rsv[8];
265};
266
267struct hclge_rx_priv_buff {
268 __le16 buf_num[HCLGE_TC_NUM];
269 u8 rsv[8];
270};
271
272struct hclge_query_version {
273 __le32 firmware;
274 __le32 firmware_rsv[5];
275};
276
277#define HCLGE_RX_PRIV_EN_B 15
278#define HCLGE_TC_NUM_ONE_DESC 4
279struct hclge_priv_wl {
280 __le16 high;
281 __le16 low;
282};
283
284struct hclge_rx_priv_wl_buf {
285 struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC];
286};
287
288struct hclge_rx_com_thrd {
289 struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC];
290};
291
292struct hclge_rx_com_wl {
293 struct hclge_priv_wl com_wl;
294};
295
296struct hclge_waterline {
297 u32 low;
298 u32 high;
299};
300
301struct hclge_tc_thrd {
302 u32 low;
303 u32 high;
304};
305
306struct hclge_priv_buf {
307 struct hclge_waterline wl; /* Waterline for low and high*/
308 u32 buf_size; /* TC private buffer size */
309 u32 enable; /* Enable TC private buffer or not */
310};
311
312#define HCLGE_MAX_TC_NUM 8
313struct hclge_shared_buf {
314 struct hclge_waterline self;
315 struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
316 u32 buf_size;
317};
318
319#define HCLGE_RX_COM_WL_EN_B 15
320struct hclge_rx_com_wl_buf {
321 __le16 high_wl;
322 __le16 low_wl;
323 u8 rsv[20];
324};
325
326#define HCLGE_RX_PKT_EN_B 15
327struct hclge_rx_pkt_buf {
328 __le16 high_pkt;
329 __le16 low_pkt;
330 u8 rsv[20];
331};
332
333#define HCLGE_PF_STATE_DONE_B 0
334#define HCLGE_PF_STATE_MAIN_B 1
335#define HCLGE_PF_STATE_BOND_B 2
336#define HCLGE_PF_STATE_MAC_N_B 6
337#define HCLGE_PF_MAC_NUM_MASK 0x3
338#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
339#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
340struct hclge_func_status {
341 __le32 vf_rst_state[4];
342 u8 pf_state;
343 u8 mac_id;
344 u8 rsv1;
345 u8 pf_cnt_in_mac;
346 u8 pf_num;
347 u8 vf_num;
348 u8 rsv[2];
349};
350
351struct hclge_pf_res {
352 __le16 tqp_num;
353 __le16 buf_size;
354 __le16 msixcap_localid_ba_nic;
355 __le16 msixcap_localid_ba_rocee;
356#define HCLGE_PF_VEC_NUM_S 0
357#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S)
358 __le16 pf_intr_vector_number;
359 __le16 pf_own_fun_number;
360 __le32 rsv[3];
361};
362
363#define HCLGE_CFG_OFFSET_S 0
364#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */
365#define HCLGE_CFG_RD_LEN_S 24
366#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S)
367#define HCLGE_CFG_RD_LEN_BYTES 16
368#define HCLGE_CFG_RD_LEN_UNIT 4
369
370#define HCLGE_CFG_VMDQ_S 0
371#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S)
372#define HCLGE_CFG_TC_NUM_S 8
373#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S)
374#define HCLGE_CFG_TQP_DESC_N_S 16
375#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S)
376#define HCLGE_CFG_PHY_ADDR_S 0
377#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S)
378#define HCLGE_CFG_MEDIA_TP_S 8
379#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S)
380#define HCLGE_CFG_RX_BUF_LEN_S 16
381#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S)
382#define HCLGE_CFG_MAC_ADDR_H_S 0
383#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S)
384#define HCLGE_CFG_DEFAULT_SPEED_S 16
385#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S)
386
387struct hclge_cfg_param {
388 __le32 offset;
389 __le32 rsv;
390 __le32 param[4];
391};
392
393#define HCLGE_MAC_MODE 0x0
394#define HCLGE_DESC_NUM 0x40
395
396#define HCLGE_ALLOC_VALID_B 0
397struct hclge_vf_num {
398 u8 alloc_valid;
399 u8 rsv[23];
400};
401
402#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
403#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
404#define HCLGE_RSS_HASH_KEY_NUM 16
405struct hclge_rss_config {
406 u8 hash_config;
407 u8 rsv[7];
408 u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
409};
410
411struct hclge_rss_input_tuple {
412 u8 ipv4_tcp_en;
413 u8 ipv4_udp_en;
414 u8 ipv4_sctp_en;
415 u8 ipv4_fragment_en;
416 u8 ipv6_tcp_en;
417 u8 ipv6_udp_en;
418 u8 ipv6_sctp_en;
419 u8 ipv6_fragment_en;
420 u8 rsv[16];
421};
422
423#define HCLGE_RSS_CFG_TBL_SIZE 16
424
425struct hclge_rss_indirection_table {
426 u16 start_table_index;
427 u16 rss_set_bitmap;
428 u8 rsv[4];
429 u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
430};
431
432#define HCLGE_RSS_TC_OFFSET_S 0
433#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S)
434#define HCLGE_RSS_TC_SIZE_S 12
435#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S)
436#define HCLGE_RSS_TC_VALID_B 15
437struct hclge_rss_tc_mode {
438 u16 rss_tc_mode[HCLGE_MAX_TC_NUM];
439 u8 rsv[8];
440};
441
442#define HCLGE_LINK_STS_B 0
443#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B)
444struct hclge_link_status {
445 u8 status;
446 u8 rsv[23];
447};
448
449struct hclge_promisc_param {
450 u8 vf_id;
451 u8 enable;
452};
453
454#define HCLGE_PROMISC_EN_B 1
455#define HCLGE_PROMISC_EN_ALL 0x7
456#define HCLGE_PROMISC_EN_UC 0x1
457#define HCLGE_PROMISC_EN_MC 0x2
458#define HCLGE_PROMISC_EN_BC 0x4
459struct hclge_promisc_cfg {
460 u8 flag;
461 u8 vf_id;
462 __le16 rsv0;
463 u8 rsv1[20];
464};
465
466enum hclge_promisc_type {
467 HCLGE_UNICAST = 1,
468 HCLGE_MULTICAST = 2,
469 HCLGE_BROADCAST = 3,
470};
471
472#define HCLGE_MAC_TX_EN_B 6
473#define HCLGE_MAC_RX_EN_B 7
474#define HCLGE_MAC_PAD_TX_B 11
475#define HCLGE_MAC_PAD_RX_B 12
476#define HCLGE_MAC_1588_TX_B 13
477#define HCLGE_MAC_1588_RX_B 14
478#define HCLGE_MAC_APP_LP_B 15
479#define HCLGE_MAC_LINE_LP_B 16
480#define HCLGE_MAC_FCS_TX_B 17
481#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18
482#define HCLGE_MAC_RX_FCS_STRIP_B 19
483#define HCLGE_MAC_RX_FCS_B 20
484#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21
485#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22
486
487struct hclge_config_mac_mode {
488 __le32 txrx_pad_fcs_loop_en;
489 u8 rsv[20];
490};
491
492#define HCLGE_CFG_SPEED_S 0
493#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S)
494
495#define HCLGE_CFG_DUPLEX_B 7
496#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B)
497
498struct hclge_config_mac_speed_dup {
499 u8 speed_dup;
500
501#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
502 u8 mac_change_fec_en;
503 u8 rsv[22];
504};
505
506#define HCLGE_QUERY_SPEED_S 3
507#define HCLGE_QUERY_AN_B 0
508#define HCLGE_QUERY_DUPLEX_B 2
509
510#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S)
511#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
512#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
513
514struct hclge_query_an_speed_dup {
515 u8 an_syn_dup_speed;
516 u8 pause;
517 u8 rsv[23];
518};
519
520#define HCLGE_RING_ID_MASK 0x3ff
521#define HCLGE_TQP_ENABLE_B 0
522
523#define HCLGE_MAC_CFG_AN_EN_B 0
524#define HCLGE_MAC_CFG_AN_INT_EN_B 1
525#define HCLGE_MAC_CFG_AN_INT_MSK_B 2
526#define HCLGE_MAC_CFG_AN_INT_CLR_B 3
527#define HCLGE_MAC_CFG_AN_RST_B 4
528
529#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B)
530
531struct hclge_config_auto_neg {
532 __le32 cfg_an_cmd_flag;
533 u8 rsv[20];
534};
535
536#define HCLGE_MAC_MIN_MTU 64
537#define HCLGE_MAC_MAX_MTU 9728
538#define HCLGE_MAC_UPLINK_PORT 0x100
539
540struct hclge_config_max_frm_size {
541 __le16 max_frm_size;
542 u8 rsv[22];
543};
544
545enum hclge_mac_vlan_tbl_opcode {
546 HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */
547 HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */
548 HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */
549 HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
550};
551
552#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0
553#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1
554#define HCLGE_MAC_EPORT_SW_EN_B 0xc
555#define HCLGE_MAC_EPORT_TYPE_B 0xb
556#define HCLGE_MAC_EPORT_VFID_S 0x3
557#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S)
558#define HCLGE_MAC_EPORT_PFID_S 0x0
559#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S)
560struct hclge_mac_vlan_tbl_entry {
561 u8 flags;
562 u8 resp_code;
563 __le16 vlan_tag;
564 __le32 mac_addr_hi32;
565 __le16 mac_addr_lo16;
566 __le16 rsv1;
567 u8 entry_type;
568 u8 mc_mac_en;
569 __le16 egress_port;
570 __le16 egress_queue;
571 u8 rsv2[6];
572};
573
574#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
575#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S)
576#define HCLGE_CFG_MTA_MAC_EN_B 0x7
577struct hclge_mta_filter_mode {
578 u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
579 u8 rsv[23];
580};
581
582#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0
583struct hclge_cfg_func_mta_filter {
584 u8 accept; /* Only used lowest 1 bit */
585 u8 function_id;
586 u8 rsv[22];
587};
588
589#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0
590#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0
591#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S)
592struct hclge_cfg_func_mta_item {
593 u16 item_idx; /* Only used lowest 12 bit */
594 u8 accept; /* Only used lowest 1 bit */
595 u8 rsv[21];
596};
597
598struct hclge_mac_vlan_add {
599 __le16 flags;
600 __le16 mac_addr_hi16;
601 __le32 mac_addr_lo32;
602 __le32 mac_addr_msk_hi32;
603 __le16 mac_addr_msk_lo16;
604 __le16 vlan_tag;
605 __le16 ingress_port;
606 __le16 egress_port;
607 u8 rsv[4];
608};
609
610#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
611struct hclge_mac_vlan_remove {
612 __le16 flags;
613 __le16 mac_addr_hi16;
614 __le32 mac_addr_lo32;
615 __le32 mac_addr_msk_hi32;
616 __le16 mac_addr_msk_lo16;
617 __le16 vlan_tag;
618 __le16 ingress_port;
619 __le16 egress_port;
620 u8 rsv[4];
621};
622
623struct hclge_vlan_filter_ctrl {
624 u8 vlan_type;
625 u8 vlan_fe;
626 u8 rsv[22];
627};
628
629struct hclge_vlan_filter_pf_cfg {
630 u8 vlan_offset;
631 u8 vlan_cfg;
632 u8 rsv[2];
633 u8 vlan_offset_bitmap[20];
634};
635
636struct hclge_vlan_filter_vf_cfg {
637 u16 vlan_id;
638 u8 resp_code;
639 u8 rsv;
640 u8 vlan_cfg;
641 u8 rsv1[3];
642 u8 vf_bitmap[16];
643};
644
645struct hclge_cfg_com_tqp_queue {
646 __le16 tqp_id;
647 __le16 stream_id;
648 u8 enable;
649 u8 rsv[19];
650};
651
652struct hclge_cfg_tx_queue_pointer {
653 __le16 tqp_id;
654 __le16 tx_tail;
655 __le16 tx_head;
656 __le16 fbd_num;
657 __le16 ring_offset;
658 u8 rsv[14];
659};
660
661#define HCLGE_TSO_MSS_MIN_S 0
662#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S)
663
664#define HCLGE_TSO_MSS_MAX_S 16
665#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S)
666
667struct hclge_cfg_tso_status {
668 __le16 tso_mss_min;
669 __le16 tso_mss_max;
670 u8 rsv[20];
671};
672
673#define HCLGE_TSO_MSS_MIN 256
674#define HCLGE_TSO_MSS_MAX 9668
675
676#define HCLGE_TQP_RESET_B 0
677struct hclge_reset_tqp_queue {
678 __le16 tqp_id;
679 u8 reset_req;
680 u8 ready_to_reset;
681 u8 rsv[20];
682};
683
684#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
685#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
686#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
687
688#define HCLGE_TYPE_CRQ 0
689#define HCLGE_TYPE_CSQ 1
690#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
691#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
692#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
693#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
694#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
695#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
696#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c
697#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
698#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
699#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
700#define HCLGE_NIC_CMQ_EN_B 16
701#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B)
702#define HCLGE_NIC_CMQ_DESC_NUM 1024
703#define HCLGE_NIC_CMQ_DESC_NUM_S 3
704
705int hclge_cmd_init(struct hclge_dev *hdev);
706static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
707{
708 writel(value, base + reg);
709}
710
711#define hclge_write_dev(a, reg, value) \
712 hclge_write_reg((a)->io_base, (reg), (value))
713#define hclge_read_dev(a, reg) \
714 hclge_read_reg((a)->io_base, (reg))
715
716static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
717{
718 u8 __iomem *reg_addr = READ_ONCE(base);
719
720 return readl(reg_addr + reg);
721}
722
723#define HCLGE_SEND_SYNC(flag) \
724 ((flag) & HCLGE_CMD_FLAG_NO_INTR)
725
726struct hclge_hw;
727int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
728void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
729 enum hclge_opcode_type opcode, bool is_read);
730
731int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
732 struct hclge_promisc_param *param);
733
734enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
735 struct hclge_desc *desc);
736enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
737 struct hclge_desc *desc);
738
739void hclge_destroy_cmd_queue(struct hclge_hw *hw);
740#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
new file mode 100644
index 000000000000..3611991689bc
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -0,0 +1,4267 @@
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/acpi.h>
11#include <linux/device.h>
12#include <linux/etherdevice.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/pci.h>
19#include <linux/platform_device.h>
20
21#include "hclge_cmd.h"
22#include "hclge_main.h"
23#include "hclge_mdio.h"
24#include "hclge_tm.h"
25#include "hnae3.h"
26
27#define HCLGE_NAME "hclge"
28#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
29#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
31#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
32
33static int hclge_rss_init_hw(struct hclge_dev *hdev);
34static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 enum hclge_mta_dmac_sel_type mta_mac_sel,
36 bool enable);
37static int hclge_init_vlan_config(struct hclge_dev *hdev);
38
39static struct hnae3_ae_algo ae_algo;
40
41static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* Required last entry */
50 {0, }
51};
52
53static const struct pci_device_id roce_pci_tbl[] = {
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
59 /* Required last entry */
60 {0, }
61};
62
63static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
64 "Mac Loopback test",
65 "Serdes Loopback test",
66 "Phy Loopback test"
67};
68
69static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
70 {"igu_rx_oversize_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
72 {"igu_rx_undersize_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
74 {"igu_rx_out_all_pkt",
75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
76 {"igu_rx_uni_pkt",
77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
78 {"igu_rx_multi_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
80 {"igu_rx_broad_pkt",
81 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
82 {"egu_tx_out_all_pkt",
83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
84 {"egu_tx_uni_pkt",
85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
86 {"egu_tx_multi_pkt",
87 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
88 {"egu_tx_broad_pkt",
89 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
90 {"ssu_ppp_mac_key_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
92 {"ssu_ppp_host_key_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
94 {"ppp_ssu_mac_rlt_num",
95 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
96 {"ppp_ssu_host_rlt_num",
97 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
98 {"ssu_tx_in_num",
99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
100 {"ssu_tx_out_num",
101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
102 {"ssu_rx_in_num",
103 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
104 {"ssu_rx_out_num",
105 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
106};
107
108static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
109 {"igu_rx_err_pkt",
110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
111 {"igu_rx_no_eof_pkt",
112 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
113 {"igu_rx_no_sof_pkt",
114 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
115 {"egu_tx_1588_pkt",
116 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
117 {"ssu_full_drop_num",
118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
119 {"ssu_part_drop_num",
120 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
121 {"ppp_key_drop_num",
122 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
123 {"ppp_rlt_drop_num",
124 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
125 {"ssu_key_drop_num",
126 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
127 {"pkt_curr_buf_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
129 {"qcn_fb_rcv_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
131 {"qcn_fb_drop_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
133 {"qcn_fb_invaild_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
135 {"rx_packet_tc0_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
137 {"rx_packet_tc1_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
139 {"rx_packet_tc2_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
141 {"rx_packet_tc3_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
143 {"rx_packet_tc4_in_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
145 {"rx_packet_tc5_in_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
147 {"rx_packet_tc6_in_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
149 {"rx_packet_tc7_in_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
151 {"rx_packet_tc0_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
153 {"rx_packet_tc1_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
155 {"rx_packet_tc2_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
157 {"rx_packet_tc3_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
159 {"rx_packet_tc4_out_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
161 {"rx_packet_tc5_out_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
163 {"rx_packet_tc6_out_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
165 {"rx_packet_tc7_out_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
167 {"tx_packet_tc0_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
169 {"tx_packet_tc1_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
171 {"tx_packet_tc2_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
173 {"tx_packet_tc3_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
175 {"tx_packet_tc4_in_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
177 {"tx_packet_tc5_in_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
179 {"tx_packet_tc6_in_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
181 {"tx_packet_tc7_in_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
183 {"tx_packet_tc0_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
185 {"tx_packet_tc1_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
187 {"tx_packet_tc2_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
189 {"tx_packet_tc3_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
191 {"tx_packet_tc4_out_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
193 {"tx_packet_tc5_out_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
195 {"tx_packet_tc6_out_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
197 {"tx_packet_tc7_out_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
199 {"pkt_curr_buf_tc0_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
201 {"pkt_curr_buf_tc1_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
203 {"pkt_curr_buf_tc2_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
205 {"pkt_curr_buf_tc3_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
207 {"pkt_curr_buf_tc4_cnt",
208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
209 {"pkt_curr_buf_tc5_cnt",
210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
211 {"pkt_curr_buf_tc6_cnt",
212 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
213 {"pkt_curr_buf_tc7_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
215 {"mb_uncopy_num",
216 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
217 {"lo_pri_unicast_rlt_drop_num",
218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
219 {"hi_pri_multicast_rlt_drop_num",
220 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
221 {"lo_pri_multicast_rlt_drop_num",
222 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
223 {"rx_oq_drop_pkt_cnt",
224 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
225 {"tx_oq_drop_pkt_cnt",
226 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
227 {"nic_l2_err_drop_pkt_cnt",
228 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
229 {"roc_l2_err_drop_pkt_cnt",
230 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
231};
232
233static const struct hclge_comm_stats_str g_mac_stats_string[] = {
234 {"mac_tx_mac_pause_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
236 {"mac_rx_mac_pause_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
238 {"mac_tx_pfc_pri0_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
240 {"mac_tx_pfc_pri1_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
242 {"mac_tx_pfc_pri2_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
244 {"mac_tx_pfc_pri3_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
246 {"mac_tx_pfc_pri4_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
248 {"mac_tx_pfc_pri5_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
250 {"mac_tx_pfc_pri6_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
252 {"mac_tx_pfc_pri7_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
254 {"mac_rx_pfc_pri0_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
256 {"mac_rx_pfc_pri1_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
258 {"mac_rx_pfc_pri2_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
260 {"mac_rx_pfc_pri3_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
262 {"mac_rx_pfc_pri4_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
264 {"mac_rx_pfc_pri5_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
266 {"mac_rx_pfc_pri6_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
268 {"mac_rx_pfc_pri7_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
270 {"mac_tx_total_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
272 {"mac_tx_total_oct_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
274 {"mac_tx_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
276 {"mac_tx_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
278 {"mac_tx_good_oct_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
280 {"mac_tx_bad_oct_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
282 {"mac_tx_uni_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
284 {"mac_tx_multi_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
286 {"mac_tx_broad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
288 {"mac_tx_undersize_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
290 {"mac_tx_overrsize_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
292 {"mac_tx_64_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
294 {"mac_tx_65_127_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
296 {"mac_tx_128_255_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
298 {"mac_tx_256_511_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
300 {"mac_tx_512_1023_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
302 {"mac_tx_1024_1518_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
304 {"mac_tx_1519_max_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
306 {"mac_rx_total_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
308 {"mac_rx_total_oct_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
310 {"mac_rx_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
312 {"mac_rx_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
314 {"mac_rx_good_oct_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
316 {"mac_rx_bad_oct_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
318 {"mac_rx_uni_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
320 {"mac_rx_multi_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
322 {"mac_rx_broad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
324 {"mac_rx_undersize_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
326 {"mac_rx_overrsize_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
328 {"mac_rx_64_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
330 {"mac_rx_65_127_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
332 {"mac_rx_128_255_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
334 {"mac_rx_256_511_oct_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
336 {"mac_rx_512_1023_oct_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
338 {"mac_rx_1024_1518_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
340 {"mac_rx_1519_max_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
342
343 {"mac_trans_fragment_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
345 {"mac_trans_undermin_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
347 {"mac_trans_jabber_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
349 {"mac_trans_err_all_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
351 {"mac_trans_from_app_good_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
353 {"mac_trans_from_app_bad_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
355 {"mac_rcv_fragment_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
357 {"mac_rcv_undermin_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
359 {"mac_rcv_jabber_pkt_num",
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
361 {"mac_rcv_fcs_err_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
363 {"mac_rcv_send_app_good_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
365 {"mac_rcv_send_app_bad_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
367};
368
369static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
370{
371#define HCLGE_64_BIT_CMD_NUM 5
372#define HCLGE_64_BIT_RTN_DATANUM 4
373 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
374 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
375 u64 *desc_data;
376 int i, k, n;
377 int ret;
378
379 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
380 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
381 if (ret) {
382 dev_err(&hdev->pdev->dev,
383 "Get 64 bit pkt stats fail, status = %d.\n", ret);
384 return ret;
385 }
386
387 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
388 if (unlikely(i == 0)) {
389 desc_data = (u64 *)(&desc[i].data[0]);
390 n = HCLGE_64_BIT_RTN_DATANUM - 1;
391 } else {
392 desc_data = (u64 *)(&desc[i]);
393 n = HCLGE_64_BIT_RTN_DATANUM;
394 }
395 for (k = 0; k < n; k++) {
396 *data++ += cpu_to_le64(*desc_data);
397 desc_data++;
398 }
399 }
400
401 return 0;
402}
403
404static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
405{
406 stats->pkt_curr_buf_cnt = 0;
407 stats->pkt_curr_buf_tc0_cnt = 0;
408 stats->pkt_curr_buf_tc1_cnt = 0;
409 stats->pkt_curr_buf_tc2_cnt = 0;
410 stats->pkt_curr_buf_tc3_cnt = 0;
411 stats->pkt_curr_buf_tc4_cnt = 0;
412 stats->pkt_curr_buf_tc5_cnt = 0;
413 stats->pkt_curr_buf_tc6_cnt = 0;
414 stats->pkt_curr_buf_tc7_cnt = 0;
415}
416
417static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
418{
419#define HCLGE_32_BIT_CMD_NUM 8
420#define HCLGE_32_BIT_RTN_DATANUM 8
421
422 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
423 struct hclge_32_bit_stats *all_32_bit_stats;
424 u32 *desc_data;
425 int i, k, n;
426 u64 *data;
427 int ret;
428
429 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
430 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
431
432 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
433 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
434 if (ret) {
435 dev_err(&hdev->pdev->dev,
436 "Get 32 bit pkt stats fail, status = %d.\n", ret);
437
438 return ret;
439 }
440
441 hclge_reset_partial_32bit_counter(all_32_bit_stats);
442 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
443 if (unlikely(i == 0)) {
444 all_32_bit_stats->igu_rx_err_pkt +=
445 cpu_to_le32(desc[i].data[0]);
446 all_32_bit_stats->igu_rx_no_eof_pkt +=
447 cpu_to_le32(desc[i].data[1] & 0xffff);
448 all_32_bit_stats->igu_rx_no_sof_pkt +=
449 cpu_to_le32((desc[i].data[1] >> 16) & 0xffff);
450
451 desc_data = (u32 *)(&desc[i].data[2]);
452 n = HCLGE_32_BIT_RTN_DATANUM - 4;
453 } else {
454 desc_data = (u32 *)(&desc[i]);
455 n = HCLGE_32_BIT_RTN_DATANUM;
456 }
457 for (k = 0; k < n; k++) {
458 *data++ += cpu_to_le32(*desc_data);
459 desc_data++;
460 }
461 }
462
463 return 0;
464}
465
466static int hclge_mac_update_stats(struct hclge_dev *hdev)
467{
468#define HCLGE_MAC_CMD_NUM 17
469#define HCLGE_RTN_DATA_NUM 4
470
471 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
472 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
473 u64 *desc_data;
474 int i, k, n;
475 int ret;
476
477 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
478 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
479 if (ret) {
480 dev_err(&hdev->pdev->dev,
481 "Get MAC pkt stats fail, status = %d.\n", ret);
482
483 return ret;
484 }
485
486 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
487 if (unlikely(i == 0)) {
488 desc_data = (u64 *)(&desc[i].data[0]);
489 n = HCLGE_RTN_DATA_NUM - 2;
490 } else {
491 desc_data = (u64 *)(&desc[i]);
492 n = HCLGE_RTN_DATA_NUM;
493 }
494 for (k = 0; k < n; k++) {
495 *data++ += cpu_to_le64(*desc_data);
496 desc_data++;
497 }
498 }
499
500 return 0;
501}
502
503static int hclge_tqps_update_stats(struct hnae3_handle *handle)
504{
505 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
506 struct hclge_vport *vport = hclge_get_vport(handle);
507 struct hclge_dev *hdev = vport->back;
508 struct hnae3_queue *queue;
509 struct hclge_desc desc[1];
510 struct hclge_tqp *tqp;
511 int ret, i;
512
513 for (i = 0; i < kinfo->num_tqps; i++) {
514 queue = handle->kinfo.tqp[i];
515 tqp = container_of(queue, struct hclge_tqp, q);
516 /* command : HCLGE_OPC_QUERY_IGU_STAT */
517 hclge_cmd_setup_basic_desc(&desc[0],
518 HCLGE_OPC_QUERY_RX_STATUS,
519 true);
520
521 desc[0].data[0] = (tqp->index & 0x1ff);
522 ret = hclge_cmd_send(&hdev->hw, desc, 1);
523 if (ret) {
524 dev_err(&hdev->pdev->dev,
525 "Query tqp stat fail, status = %d,queue = %d\n",
526 ret, i);
527 return ret;
528 }
529 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
530 cpu_to_le32(desc[0].data[4]);
531 }
532
533 for (i = 0; i < kinfo->num_tqps; i++) {
534 queue = handle->kinfo.tqp[i];
535 tqp = container_of(queue, struct hclge_tqp, q);
536 /* command : HCLGE_OPC_QUERY_IGU_STAT */
537 hclge_cmd_setup_basic_desc(&desc[0],
538 HCLGE_OPC_QUERY_TX_STATUS,
539 true);
540
541 desc[0].data[0] = (tqp->index & 0x1ff);
542 ret = hclge_cmd_send(&hdev->hw, desc, 1);
543 if (ret) {
544 dev_err(&hdev->pdev->dev,
545 "Query tqp stat fail, status = %d,queue = %d\n",
546 ret, i);
547 return ret;
548 }
549 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
550 cpu_to_le32(desc[0].data[4]);
551 }
552
553 return 0;
554}
555
556static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
557{
558 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
559 struct hclge_tqp *tqp;
560 u64 *buff = data;
561 int i;
562
563 for (i = 0; i < kinfo->num_tqps; i++) {
564 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
565 *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd);
566 }
567
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
570 *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd);
571 }
572
573 return buff;
574}
575
576static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
577{
578 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
579
580 return kinfo->num_tqps * (2);
581}
582
583static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
584{
585 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
586 u8 *buff = data;
587 int i = 0;
588
589 for (i = 0; i < kinfo->num_tqps; i++) {
590 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
591 struct hclge_tqp, q);
592 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
593 tqp->index);
594 buff = buff + ETH_GSTRING_LEN;
595 }
596
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
599 struct hclge_tqp, q);
600 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
601 tqp->index);
602 buff = buff + ETH_GSTRING_LEN;
603 }
604
605 return buff;
606}
607
608static u64 *hclge_comm_get_stats(void *comm_stats,
609 const struct hclge_comm_stats_str strs[],
610 int size, u64 *data)
611{
612 u64 *buf = data;
613 u32 i;
614
615 for (i = 0; i < size; i++)
616 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
617
618 return buf + size;
619}
620
621static u8 *hclge_comm_get_strings(u32 stringset,
622 const struct hclge_comm_stats_str strs[],
623 int size, u8 *data)
624{
625 char *buff = (char *)data;
626 u32 i;
627
628 if (stringset != ETH_SS_STATS)
629 return buff;
630
631 for (i = 0; i < size; i++) {
632 snprintf(buff, ETH_GSTRING_LEN,
633 strs[i].desc);
634 buff = buff + ETH_GSTRING_LEN;
635 }
636
637 return (u8 *)buff;
638}
639
640static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
641 struct net_device_stats *net_stats)
642{
643 net_stats->tx_dropped = 0;
644 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
645 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
646 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
647
648 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
649 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
650 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
651 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
652 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
653 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
654
655 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
656 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
657
658 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
659 net_stats->rx_length_errors =
660 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
661 net_stats->rx_length_errors +=
662 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
663 net_stats->rx_over_errors =
664 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
665}
666
667static void hclge_update_stats_for_all(struct hclge_dev *hdev)
668{
669 struct hnae3_handle *handle;
670 int status;
671
672 handle = &hdev->vport[0].nic;
673 if (handle->client) {
674 status = hclge_tqps_update_stats(handle);
675 if (status) {
676 dev_err(&hdev->pdev->dev,
677 "Update TQPS stats fail, status = %d.\n",
678 status);
679 }
680 }
681
682 status = hclge_mac_update_stats(hdev);
683 if (status)
684 dev_err(&hdev->pdev->dev,
685 "Update MAC stats fail, status = %d.\n", status);
686
687 status = hclge_32_bit_update_stats(hdev);
688 if (status)
689 dev_err(&hdev->pdev->dev,
690 "Update 32 bit stats fail, status = %d.\n",
691 status);
692
693 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
694}
695
696static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
698{
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
701 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
702 int status;
703
704 status = hclge_mac_update_stats(hdev);
705 if (status)
706 dev_err(&hdev->pdev->dev,
707 "Update MAC stats fail, status = %d.\n",
708 status);
709
710 status = hclge_32_bit_update_stats(hdev);
711 if (status)
712 dev_err(&hdev->pdev->dev,
713 "Update 32 bit stats fail, status = %d.\n",
714 status);
715
716 status = hclge_64_bit_update_stats(hdev);
717 if (status)
718 dev_err(&hdev->pdev->dev,
719 "Update 64 bit stats fail, status = %d.\n",
720 status);
721
722 status = hclge_tqps_update_stats(handle);
723 if (status)
724 dev_err(&hdev->pdev->dev,
725 "Update TQPS stats fail, status = %d.\n",
726 status);
727
728 hclge_update_netstat(hw_stats, net_stats);
729}
730
731static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
732{
733#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
734
735 struct hclge_vport *vport = hclge_get_vport(handle);
736 struct hclge_dev *hdev = vport->back;
737 int count = 0;
738
739 /* Loopback test support rules:
740 * mac: only GE mode support
741 * serdes: all mac mode will support include GE/XGE/LGE/CGE
742 * phy: only support when phy device exist on board
743 */
744 if (stringset == ETH_SS_TEST) {
745 /* clear loopback bit flags at first */
746 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
747 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
748 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
749 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
750 count += 1;
751 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
752 } else {
753 count = -EOPNOTSUPP;
754 }
755 } else if (stringset == ETH_SS_STATS) {
756 count = ARRAY_SIZE(g_mac_stats_string) +
757 ARRAY_SIZE(g_all_32bit_stats_string) +
758 ARRAY_SIZE(g_all_64bit_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
760 }
761
762 return count;
763}
764
765static void hclge_get_strings(struct hnae3_handle *handle,
766 u32 stringset,
767 u8 *data)
768{
769 u8 *p = (char *)data;
770 int size;
771
772 if (stringset == ETH_SS_STATS) {
773 size = ARRAY_SIZE(g_mac_stats_string);
774 p = hclge_comm_get_strings(stringset,
775 g_mac_stats_string,
776 size,
777 p);
778 size = ARRAY_SIZE(g_all_32bit_stats_string);
779 p = hclge_comm_get_strings(stringset,
780 g_all_32bit_stats_string,
781 size,
782 p);
783 size = ARRAY_SIZE(g_all_64bit_stats_string);
784 p = hclge_comm_get_strings(stringset,
785 g_all_64bit_stats_string,
786 size,
787 p);
788 p = hclge_tqps_get_strings(handle, p);
789 } else if (stringset == ETH_SS_TEST) {
790 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
791 memcpy(p,
792 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
793 ETH_GSTRING_LEN);
794 p += ETH_GSTRING_LEN;
795 }
796 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
797 memcpy(p,
798 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
799 ETH_GSTRING_LEN);
800 p += ETH_GSTRING_LEN;
801 }
802 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
803 memcpy(p,
804 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
805 ETH_GSTRING_LEN);
806 p += ETH_GSTRING_LEN;
807 }
808 }
809}
810
811static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
812{
813 struct hclge_vport *vport = hclge_get_vport(handle);
814 struct hclge_dev *hdev = vport->back;
815 u64 *p;
816
817 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
818 g_mac_stats_string,
819 ARRAY_SIZE(g_mac_stats_string),
820 data);
821 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
822 g_all_32bit_stats_string,
823 ARRAY_SIZE(g_all_32bit_stats_string),
824 p);
825 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
826 g_all_64bit_stats_string,
827 ARRAY_SIZE(g_all_64bit_stats_string),
828 p);
829 p = hclge_tqps_get_stats(handle, p);
830}
831
832static int hclge_parse_func_status(struct hclge_dev *hdev,
833 struct hclge_func_status *status)
834{
835 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
836 return -EINVAL;
837
838 /* Set the pf to main pf */
839 if (status->pf_state & HCLGE_PF_STATE_MAIN)
840 hdev->flag |= HCLGE_FLAG_MAIN;
841 else
842 hdev->flag &= ~HCLGE_FLAG_MAIN;
843
844 hdev->num_req_vfs = status->vf_num / status->pf_num;
845 return 0;
846}
847
848static int hclge_query_function_status(struct hclge_dev *hdev)
849{
850 struct hclge_func_status *req;
851 struct hclge_desc desc;
852 int timeout = 0;
853 int ret;
854
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status *)desc.data;
857
858 do {
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 if (ret) {
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n",
863 ret);
864
865 return ret;
866 }
867
868 /* Check pf reset is done */
869 if (req->pf_state)
870 break;
871 usleep_range(1000, 2000);
872 } while (timeout++ < 5);
873
874 ret = hclge_parse_func_status(hdev, req);
875
876 return ret;
877}
878
879static int hclge_query_pf_resource(struct hclge_dev *hdev)
880{
881 struct hclge_pf_res *req;
882 struct hclge_desc desc;
883 int ret;
884
885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 if (ret) {
888 dev_err(&hdev->pdev->dev,
889 "query pf resource failed %d.\n", ret);
890 return ret;
891 }
892
893 req = (struct hclge_pf_res *)desc.data;
894 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
895 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
896
897 if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
898 hdev->num_roce_msix =
899 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
900 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
901
902 /* PF should have NIC vectors and Roce vectors,
903 * NIC vectors are queued before Roce vectors.
904 */
905 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
906 } else {
907 hdev->num_msi =
908 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
909 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 }
911
912 return 0;
913}
914
915static int hclge_parse_speed(int speed_cmd, int *speed)
916{
917 switch (speed_cmd) {
918 case 6:
919 *speed = HCLGE_MAC_SPEED_10M;
920 break;
921 case 7:
922 *speed = HCLGE_MAC_SPEED_100M;
923 break;
924 case 0:
925 *speed = HCLGE_MAC_SPEED_1G;
926 break;
927 case 1:
928 *speed = HCLGE_MAC_SPEED_10G;
929 break;
930 case 2:
931 *speed = HCLGE_MAC_SPEED_25G;
932 break;
933 case 3:
934 *speed = HCLGE_MAC_SPEED_40G;
935 break;
936 case 4:
937 *speed = HCLGE_MAC_SPEED_50G;
938 break;
939 case 5:
940 *speed = HCLGE_MAC_SPEED_100G;
941 break;
942 default:
943 return -EINVAL;
944 }
945
946 return 0;
947}
948
949static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
950{
951 struct hclge_cfg_param *req;
952 u64 mac_addr_tmp_high;
953 u64 mac_addr_tmp;
954 int i;
955
956 req = (struct hclge_cfg_param *)desc[0].data;
957
958 /* get the configuration */
959 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
960 HCLGE_CFG_VMDQ_M,
961 HCLGE_CFG_VMDQ_S);
962 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
963 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
964 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
965 HCLGE_CFG_TQP_DESC_N_M,
966 HCLGE_CFG_TQP_DESC_N_S);
967
968 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
969 HCLGE_CFG_PHY_ADDR_M,
970 HCLGE_CFG_PHY_ADDR_S);
971 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
972 HCLGE_CFG_MEDIA_TP_M,
973 HCLGE_CFG_MEDIA_TP_S);
974 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
975 HCLGE_CFG_RX_BUF_LEN_M,
976 HCLGE_CFG_RX_BUF_LEN_S);
977 /* get mac_address */
978 mac_addr_tmp = __le32_to_cpu(req->param[2]);
979 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
980 HCLGE_CFG_MAC_ADDR_H_M,
981 HCLGE_CFG_MAC_ADDR_H_S);
982
983 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
984
985 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
986 HCLGE_CFG_DEFAULT_SPEED_M,
987 HCLGE_CFG_DEFAULT_SPEED_S);
988 for (i = 0; i < ETH_ALEN; i++)
989 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
990
991 req = (struct hclge_cfg_param *)desc[1].data;
992 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
993}
994
995/* hclge_get_cfg: query the static parameter from flash
996 * @hdev: pointer to struct hclge_dev
997 * @hcfg: the config structure to be getted
998 */
999static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1000{
1001 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1002 struct hclge_cfg_param *req;
1003 int i, ret;
1004
1005 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1006 req = (struct hclge_cfg_param *)desc[i].data;
1007 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1008 true);
1009 hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M,
1010 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1011 /* Len should be united by 4 bytes when send to hardware */
1012 hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M,
1013 HCLGE_CFG_RD_LEN_S,
1014 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1015 req->offset = cpu_to_le32(req->offset);
1016 }
1017
1018 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1019 if (ret) {
1020 dev_err(&hdev->pdev->dev,
1021 "get config failed %d.\n", ret);
1022 return ret;
1023 }
1024
1025 hclge_parse_cfg(hcfg, desc);
1026 return 0;
1027}
1028
1029static int hclge_get_cap(struct hclge_dev *hdev)
1030{
1031 int ret;
1032
1033 ret = hclge_query_function_status(hdev);
1034 if (ret) {
1035 dev_err(&hdev->pdev->dev,
1036 "query function status error %d.\n", ret);
1037 return ret;
1038 }
1039
1040 /* get pf resource */
1041 ret = hclge_query_pf_resource(hdev);
1042 if (ret) {
1043 dev_err(&hdev->pdev->dev,
1044 "query pf resource error %d.\n", ret);
1045 return ret;
1046 }
1047
1048 return 0;
1049}
1050
1051static int hclge_configure(struct hclge_dev *hdev)
1052{
1053 struct hclge_cfg cfg;
1054 int ret, i;
1055
1056 ret = hclge_get_cfg(hdev, &cfg);
1057 if (ret) {
1058 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1059 return ret;
1060 }
1061
1062 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1063 hdev->base_tqp_pid = 0;
1064 hdev->rss_size_max = 1;
1065 hdev->rx_buf_len = cfg.rx_buf_len;
1066 for (i = 0; i < ETH_ALEN; i++)
1067 hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
1068 hdev->hw.mac.media_type = cfg.media_type;
1069 hdev->num_desc = cfg.tqp_desc_num;
1070 hdev->tm_info.num_pg = 1;
1071 hdev->tm_info.num_tc = cfg.tc_num;
1072 hdev->tm_info.hw_pfc_map = 0;
1073
1074 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1075 if (ret) {
1076 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1077 return ret;
1078 }
1079
1080 if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) ||
1081 (hdev->tm_info.num_tc < 1)) {
1082 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1083 hdev->tm_info.num_tc);
1084 hdev->tm_info.num_tc = 1;
1085 }
1086
1087 /* Currently not support uncontiuous tc */
1088 for (i = 0; i < cfg.tc_num; i++)
1089 hnae_set_bit(hdev->hw_tc_map, i, 1);
1090
1091 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1092 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1093 else
1094 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1095
1096 return ret;
1097}
1098
1099static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1100 int tso_mss_max)
1101{
1102 struct hclge_cfg_tso_status *req;
1103 struct hclge_desc desc;
1104
1105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1106
1107 req = (struct hclge_cfg_tso_status *)desc.data;
1108 hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M,
1109 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1110 hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M,
1111 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1112
1113 return hclge_cmd_send(&hdev->hw, &desc, 1);
1114}
1115
1116static int hclge_alloc_tqps(struct hclge_dev *hdev)
1117{
1118 struct hclge_tqp *tqp;
1119 int i;
1120
1121 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1122 sizeof(struct hclge_tqp), GFP_KERNEL);
1123 if (!hdev->htqp)
1124 return -ENOMEM;
1125
1126 tqp = hdev->htqp;
1127
1128 for (i = 0; i < hdev->num_tqps; i++) {
1129 tqp->dev = &hdev->pdev->dev;
1130 tqp->index = i;
1131
1132 tqp->q.ae_algo = &ae_algo;
1133 tqp->q.buf_size = hdev->rx_buf_len;
1134 tqp->q.desc_num = hdev->num_desc;
1135 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1136 i * HCLGE_TQP_REG_SIZE;
1137
1138 tqp++;
1139 }
1140
1141 return 0;
1142}
1143
1144static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1145 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1146{
1147 struct hclge_tqp_map *req;
1148 struct hclge_desc desc;
1149 int ret;
1150
1151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1152
1153 req = (struct hclge_tqp_map *)desc.data;
1154 req->tqp_id = cpu_to_le16(tqp_pid);
1155 req->tqp_vf = cpu_to_le16(func_id);
1156 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1157 1 << HCLGE_TQP_MAP_EN_B;
1158 req->tqp_vid = cpu_to_le16(tqp_vid);
1159
1160 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1161 if (ret) {
1162 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1163 ret);
1164 return ret;
1165 }
1166
1167 return 0;
1168}
1169
1170static int hclge_assign_tqp(struct hclge_vport *vport,
1171 struct hnae3_queue **tqp, u16 num_tqps)
1172{
1173 struct hclge_dev *hdev = vport->back;
1174 int i, alloced, func_id, ret;
1175 bool is_pf;
1176
1177 func_id = vport->vport_id;
1178 is_pf = (vport->vport_id == 0) ? true : false;
1179
1180 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1181 alloced < num_tqps; i++) {
1182 if (!hdev->htqp[i].alloced) {
1183 hdev->htqp[i].q.handle = &vport->nic;
1184 hdev->htqp[i].q.tqp_index = alloced;
1185 tqp[alloced] = &hdev->htqp[i].q;
1186 hdev->htqp[i].alloced = true;
1187 ret = hclge_map_tqps_to_func(hdev, func_id,
1188 hdev->htqp[i].index,
1189 alloced, is_pf);
1190 if (ret)
1191 return ret;
1192
1193 alloced++;
1194 }
1195 }
1196 vport->alloc_tqps = num_tqps;
1197
1198 return 0;
1199}
1200
1201static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1202{
1203 struct hnae3_handle *nic = &vport->nic;
1204 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1205 struct hclge_dev *hdev = vport->back;
1206 int i, ret;
1207
1208 kinfo->num_desc = hdev->num_desc;
1209 kinfo->rx_buf_len = hdev->rx_buf_len;
1210 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1211 kinfo->rss_size
1212 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1213 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1214
1215 for (i = 0; i < HNAE3_MAX_TC; i++) {
1216 if (hdev->hw_tc_map & BIT(i)) {
1217 kinfo->tc_info[i].enable = true;
1218 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1219 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1220 kinfo->tc_info[i].tc = i;
1221 } else {
1222 /* Set to default queue if TC is disable */
1223 kinfo->tc_info[i].enable = false;
1224 kinfo->tc_info[i].tqp_offset = 0;
1225 kinfo->tc_info[i].tqp_count = 1;
1226 kinfo->tc_info[i].tc = 0;
1227 }
1228 }
1229
1230 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1231 sizeof(struct hnae3_queue *), GFP_KERNEL);
1232 if (!kinfo->tqp)
1233 return -ENOMEM;
1234
1235 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1236 if (ret) {
1237 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1238 return -EINVAL;
1239 }
1240
1241 return 0;
1242}
1243
1244static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1245{
1246 /* this would be initialized later */
1247}
1248
1249static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1250{
1251 struct hnae3_handle *nic = &vport->nic;
1252 struct hclge_dev *hdev = vport->back;
1253 int ret;
1254
1255 nic->pdev = hdev->pdev;
1256 nic->ae_algo = &ae_algo;
1257 nic->numa_node_mask = hdev->numa_node_mask;
1258
1259 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1260 ret = hclge_knic_setup(vport, num_tqps);
1261 if (ret) {
1262 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1263 ret);
1264 return ret;
1265 }
1266 } else {
1267 hclge_unic_setup(vport, num_tqps);
1268 }
1269
1270 return 0;
1271}
1272
1273static int hclge_alloc_vport(struct hclge_dev *hdev)
1274{
1275 struct pci_dev *pdev = hdev->pdev;
1276 struct hclge_vport *vport;
1277 u32 tqp_main_vport;
1278 u32 tqp_per_vport;
1279 int num_vport, i;
1280 int ret;
1281
1282 /* We need to alloc a vport for main NIC of PF */
1283 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1284
1285 if (hdev->num_tqps < num_vport)
1286 num_vport = hdev->num_tqps;
1287
1288 /* Alloc the same number of TQPs for every vport */
1289 tqp_per_vport = hdev->num_tqps / num_vport;
1290 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1291
1292 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1293 GFP_KERNEL);
1294 if (!vport)
1295 return -ENOMEM;
1296
1297 hdev->vport = vport;
1298 hdev->num_alloc_vport = num_vport;
1299
1300#ifdef CONFIG_PCI_IOV
1301 /* Enable SRIOV */
1302 if (hdev->num_req_vfs) {
1303 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1304 hdev->num_req_vfs);
1305 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1306 if (ret) {
1307 hdev->num_alloc_vfs = 0;
1308 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1309 ret);
1310 return ret;
1311 }
1312 }
1313 hdev->num_alloc_vfs = hdev->num_req_vfs;
1314#endif
1315
1316 for (i = 0; i < num_vport; i++) {
1317 vport->back = hdev;
1318 vport->vport_id = i;
1319
1320 if (i == 0)
1321 ret = hclge_vport_setup(vport, tqp_main_vport);
1322 else
1323 ret = hclge_vport_setup(vport, tqp_per_vport);
1324 if (ret) {
1325 dev_err(&pdev->dev,
1326 "vport setup failed for vport %d, %d\n",
1327 i, ret);
1328 return ret;
1329 }
1330
1331 vport++;
1332 }
1333
1334 return 0;
1335}
1336
1337static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size)
1338{
1339/* TX buffer size is unit by 128 byte */
1340#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1341#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1342 struct hclge_tx_buff_alloc *req;
1343 struct hclge_desc desc;
1344 int ret;
1345 u8 i;
1346
1347 req = (struct hclge_tx_buff_alloc *)desc.data;
1348
1349 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1350 for (i = 0; i < HCLGE_TC_NUM; i++)
1351 req->tx_pkt_buff[i] =
1352 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1353 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1354
1355 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1356 if (ret) {
1357 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1358 ret);
1359 return ret;
1360 }
1361
1362 return 0;
1363}
1364
1365static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size)
1366{
1367 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size);
1368
1369 if (ret) {
1370 dev_err(&hdev->pdev->dev,
1371 "tx buffer alloc failed %d\n", ret);
1372 return ret;
1373 }
1374
1375 return 0;
1376}
1377
1378static int hclge_get_tc_num(struct hclge_dev *hdev)
1379{
1380 int i, cnt = 0;
1381
1382 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1383 if (hdev->hw_tc_map & BIT(i))
1384 cnt++;
1385 return cnt;
1386}
1387
1388static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1389{
1390 int i, cnt = 0;
1391
1392 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1393 if (hdev->hw_tc_map & BIT(i) &&
1394 hdev->tm_info.hw_pfc_map & BIT(i))
1395 cnt++;
1396 return cnt;
1397}
1398
1399/* Get the number of pfc enabled TCs, which have private buffer */
1400static int hclge_get_pfc_priv_num(struct hclge_dev *hdev)
1401{
1402 struct hclge_priv_buf *priv;
1403 int i, cnt = 0;
1404
1405 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1406 priv = &hdev->priv_buf[i];
1407 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1408 priv->enable)
1409 cnt++;
1410 }
1411
1412 return cnt;
1413}
1414
1415/* Get the number of pfc disabled TCs, which have private buffer */
1416static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev)
1417{
1418 struct hclge_priv_buf *priv;
1419 int i, cnt = 0;
1420
1421 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1422 priv = &hdev->priv_buf[i];
1423 if (hdev->hw_tc_map & BIT(i) &&
1424 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1425 priv->enable)
1426 cnt++;
1427 }
1428
1429 return cnt;
1430}
1431
1432static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev)
1433{
1434 struct hclge_priv_buf *priv;
1435 u32 rx_priv = 0;
1436 int i;
1437
1438 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1439 priv = &hdev->priv_buf[i];
1440 if (priv->enable)
1441 rx_priv += priv->buf_size;
1442 }
1443 return rx_priv;
1444}
1445
1446static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
1447{
1448 u32 shared_buf_min, shared_buf_tc, shared_std;
1449 int tc_num, pfc_enable_num;
1450 u32 shared_buf;
1451 u32 rx_priv;
1452 int i;
1453
1454 tc_num = hclge_get_tc_num(hdev);
1455 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1456
1457 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1458 shared_buf_tc = pfc_enable_num * hdev->mps +
1459 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1460 hdev->mps;
1461 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1462
1463 rx_priv = hclge_get_rx_priv_buff_alloced(hdev);
1464 if (rx_all <= rx_priv + shared_std)
1465 return false;
1466
1467 shared_buf = rx_all - rx_priv;
1468 hdev->s_buf.buf_size = shared_buf;
1469 hdev->s_buf.self.high = shared_buf;
1470 hdev->s_buf.self.low = 2 * hdev->mps;
1471
1472 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1473 if ((hdev->hw_tc_map & BIT(i)) &&
1474 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1475 hdev->s_buf.tc_thrd[i].low = hdev->mps;
1476 hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1477 } else {
1478 hdev->s_buf.tc_thrd[i].low = 0;
1479 hdev->s_buf.tc_thrd[i].high = hdev->mps;
1480 }
1481 }
1482
1483 return true;
1484}
1485
1486/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1487 * @hdev: pointer to struct hclge_dev
1488 * @tx_size: the allocated tx buffer for all TCs
1489 * @return: 0: calculate sucessful, negative: fail
1490 */
1491int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1492{
1493 u32 rx_all = hdev->pkt_buf_size - tx_size;
1494 int no_pfc_priv_num, pfc_priv_num;
1495 struct hclge_priv_buf *priv;
1496 int i;
1497
1498 /* step 1, try to alloc private buffer for all enabled tc */
1499 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1500 priv = &hdev->priv_buf[i];
1501 if (hdev->hw_tc_map & BIT(i)) {
1502 priv->enable = 1;
1503 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1504 priv->wl.low = hdev->mps;
1505 priv->wl.high = priv->wl.low + hdev->mps;
1506 priv->buf_size = priv->wl.high +
1507 HCLGE_DEFAULT_DV;
1508 } else {
1509 priv->wl.low = 0;
1510 priv->wl.high = 2 * hdev->mps;
1511 priv->buf_size = priv->wl.high;
1512 }
1513 }
1514 }
1515
1516 if (hclge_is_rx_buf_ok(hdev, rx_all))
1517 return 0;
1518
1519 /* step 2, try to decrease the buffer size of
1520 * no pfc TC's private buffer
1521 */
1522 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1523 priv = &hdev->priv_buf[i];
1524
1525 if (hdev->hw_tc_map & BIT(i))
1526 priv->enable = 1;
1527
1528 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1529 priv->wl.low = 128;
1530 priv->wl.high = priv->wl.low + hdev->mps;
1531 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1532 } else {
1533 priv->wl.low = 0;
1534 priv->wl.high = hdev->mps;
1535 priv->buf_size = priv->wl.high;
1536 }
1537 }
1538
1539 if (hclge_is_rx_buf_ok(hdev, rx_all))
1540 return 0;
1541
1542 /* step 3, try to reduce the number of pfc disabled TCs,
1543 * which have private buffer
1544 */
1545 /* get the total no pfc enable TC number, which have private buffer */
1546 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev);
1547
1548 /* let the last to be cleared first */
1549 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1550 priv = &hdev->priv_buf[i];
1551
1552 if (hdev->hw_tc_map & BIT(i) &&
1553 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1554 /* Clear the no pfc TC private buffer */
1555 priv->wl.low = 0;
1556 priv->wl.high = 0;
1557 priv->buf_size = 0;
1558 priv->enable = 0;
1559 no_pfc_priv_num--;
1560 }
1561
1562 if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1563 no_pfc_priv_num == 0)
1564 break;
1565 }
1566
1567 if (hclge_is_rx_buf_ok(hdev, rx_all))
1568 return 0;
1569
1570 /* step 4, try to reduce the number of pfc enabled TCs
1571 * which have private buffer.
1572 */
1573 pfc_priv_num = hclge_get_pfc_priv_num(hdev);
1574
1575 /* let the last to be cleared first */
1576 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1577 priv = &hdev->priv_buf[i];
1578
1579 if (hdev->hw_tc_map & BIT(i) &&
1580 hdev->tm_info.hw_pfc_map & BIT(i)) {
1581 /* Reduce the number of pfc TC with private buffer */
1582 priv->wl.low = 0;
1583 priv->enable = 0;
1584 priv->wl.high = 0;
1585 priv->buf_size = 0;
1586 pfc_priv_num--;
1587 }
1588
1589 if (hclge_is_rx_buf_ok(hdev, rx_all) ||
1590 pfc_priv_num == 0)
1591 break;
1592 }
1593 if (hclge_is_rx_buf_ok(hdev, rx_all))
1594 return 0;
1595
1596 return -ENOMEM;
1597}
1598
1599static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
1600{
1601 struct hclge_rx_priv_buff *req;
1602 struct hclge_desc desc;
1603 int ret;
1604 int i;
1605
1606 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1607 req = (struct hclge_rx_priv_buff *)desc.data;
1608
1609 /* Alloc private buffer TCs */
1610 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1611 struct hclge_priv_buf *priv = &hdev->priv_buf[i];
1612
1613 req->buf_num[i] =
1614 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1615 req->buf_num[i] |=
1616 cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
1617 }
1618
1619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1620 if (ret) {
1621 dev_err(&hdev->pdev->dev,
1622 "rx private buffer alloc cmd failed %d\n", ret);
1623 return ret;
1624 }
1625
1626 return 0;
1627}
1628
1629#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1630
1631static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
1632{
1633 struct hclge_rx_priv_wl_buf *req;
1634 struct hclge_priv_buf *priv;
1635 struct hclge_desc desc[2];
1636 int i, j;
1637 int ret;
1638
1639 for (i = 0; i < 2; i++) {
1640 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1641 false);
1642 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1643
1644 /* The first descriptor set the NEXT bit to 1 */
1645 if (i == 0)
1646 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1647 else
1648 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1649
1650 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1651 priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j];
1652 req->tc_wl[j].high =
1653 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1654 req->tc_wl[j].high |=
1655 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1656 HCLGE_RX_PRIV_EN_B);
1657 req->tc_wl[j].low =
1658 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1659 req->tc_wl[j].low |=
1660 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1661 HCLGE_RX_PRIV_EN_B);
1662 }
1663 }
1664
1665 /* Send 2 descriptor at one time */
1666 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1667 if (ret) {
1668 dev_err(&hdev->pdev->dev,
1669 "rx private waterline config cmd failed %d\n",
1670 ret);
1671 return ret;
1672 }
1673 return 0;
1674}
1675
1676static int hclge_common_thrd_config(struct hclge_dev *hdev)
1677{
1678 struct hclge_shared_buf *s_buf = &hdev->s_buf;
1679 struct hclge_rx_com_thrd *req;
1680 struct hclge_desc desc[2];
1681 struct hclge_tc_thrd *tc;
1682 int i, j;
1683 int ret;
1684
1685 for (i = 0; i < 2; i++) {
1686 hclge_cmd_setup_basic_desc(&desc[i],
1687 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1688 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1689
1690 /* The first descriptor set the NEXT bit to 1 */
1691 if (i == 0)
1692 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1693 else
1694 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1695
1696 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1697 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1698
1699 req->com_thrd[j].high =
1700 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1701 req->com_thrd[j].high |=
1702 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1703 HCLGE_RX_PRIV_EN_B);
1704 req->com_thrd[j].low =
1705 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1706 req->com_thrd[j].low |=
1707 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1708 HCLGE_RX_PRIV_EN_B);
1709 }
1710 }
1711
1712 /* Send 2 descriptors at one time */
1713 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1714 if (ret) {
1715 dev_err(&hdev->pdev->dev,
1716 "common threshold config cmd failed %d\n", ret);
1717 return ret;
1718 }
1719 return 0;
1720}
1721
1722static int hclge_common_wl_config(struct hclge_dev *hdev)
1723{
1724 struct hclge_shared_buf *buf = &hdev->s_buf;
1725 struct hclge_rx_com_wl *req;
1726 struct hclge_desc desc;
1727 int ret;
1728
1729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1730
1731 req = (struct hclge_rx_com_wl *)desc.data;
1732 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1733 req->com_wl.high |=
1734 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1735 HCLGE_RX_PRIV_EN_B);
1736
1737 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1738 req->com_wl.low |=
1739 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1740 HCLGE_RX_PRIV_EN_B);
1741
1742 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1743 if (ret) {
1744 dev_err(&hdev->pdev->dev,
1745 "common waterline config cmd failed %d\n", ret);
1746 return ret;
1747 }
1748
1749 return 0;
1750}
1751
1752int hclge_buffer_alloc(struct hclge_dev *hdev)
1753{
1754 u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1755 int ret;
1756
1757 hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM,
1758 sizeof(struct hclge_priv_buf),
1759 GFP_KERNEL | __GFP_ZERO);
1760 if (!hdev->priv_buf)
1761 return -ENOMEM;
1762
1763 ret = hclge_tx_buffer_alloc(hdev, tx_buf_size);
1764 if (ret) {
1765 dev_err(&hdev->pdev->dev,
1766 "could not alloc tx buffers %d\n", ret);
1767 return ret;
1768 }
1769
1770 ret = hclge_rx_buffer_calc(hdev, tx_buf_size);
1771 if (ret) {
1772 dev_err(&hdev->pdev->dev,
1773 "could not calc rx priv buffer size for all TCs %d\n",
1774 ret);
1775 return ret;
1776 }
1777
1778 ret = hclge_rx_priv_buf_alloc(hdev);
1779 if (ret) {
1780 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1781 ret);
1782 return ret;
1783 }
1784
1785 ret = hclge_rx_priv_wl_config(hdev);
1786 if (ret) {
1787 dev_err(&hdev->pdev->dev,
1788 "could not configure rx private waterline %d\n", ret);
1789 return ret;
1790 }
1791
1792 ret = hclge_common_thrd_config(hdev);
1793 if (ret) {
1794 dev_err(&hdev->pdev->dev,
1795 "could not configure common threshold %d\n", ret);
1796 return ret;
1797 }
1798
1799 ret = hclge_common_wl_config(hdev);
1800 if (ret) {
1801 dev_err(&hdev->pdev->dev,
1802 "could not configure common waterline %d\n", ret);
1803 return ret;
1804 }
1805
1806 return 0;
1807}
1808
1809static int hclge_init_roce_base_info(struct hclge_vport *vport)
1810{
1811 struct hnae3_handle *roce = &vport->roce;
1812 struct hnae3_handle *nic = &vport->nic;
1813
1814 roce->rinfo.num_vectors = vport->back->num_roce_msix;
1815
1816 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1817 vport->back->num_msi_left == 0)
1818 return -EINVAL;
1819
1820 roce->rinfo.base_vector = vport->back->roce_base_vector;
1821
1822 roce->rinfo.netdev = nic->kinfo.netdev;
1823 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1824
1825 roce->pdev = nic->pdev;
1826 roce->ae_algo = nic->ae_algo;
1827 roce->numa_node_mask = nic->numa_node_mask;
1828
1829 return 0;
1830}
1831
1832static int hclge_init_msix(struct hclge_dev *hdev)
1833{
1834 struct pci_dev *pdev = hdev->pdev;
1835 int ret, i;
1836
1837 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1838 sizeof(struct msix_entry),
1839 GFP_KERNEL);
1840 if (!hdev->msix_entries)
1841 return -ENOMEM;
1842
1843 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1844 sizeof(u16), GFP_KERNEL);
1845 if (!hdev->vector_status)
1846 return -ENOMEM;
1847
1848 for (i = 0; i < hdev->num_msi; i++) {
1849 hdev->msix_entries[i].entry = i;
1850 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1851 }
1852
1853 hdev->num_msi_left = hdev->num_msi;
1854 hdev->base_msi_vector = hdev->pdev->irq;
1855 hdev->roce_base_vector = hdev->base_msi_vector +
1856 HCLGE_ROCE_VECTOR_OFFSET;
1857
1858 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1859 hdev->num_msi, hdev->num_msi);
1860 if (ret < 0) {
1861 dev_info(&hdev->pdev->dev,
1862 "MSI-X vector alloc failed: %d\n", ret);
1863 return ret;
1864 }
1865
1866 return 0;
1867}
1868
1869static int hclge_init_msi(struct hclge_dev *hdev)
1870{
1871 struct pci_dev *pdev = hdev->pdev;
1872 int vectors;
1873 int i;
1874
1875 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1876 sizeof(u16), GFP_KERNEL);
1877 if (!hdev->vector_status)
1878 return -ENOMEM;
1879
1880 for (i = 0; i < hdev->num_msi; i++)
1881 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1882
1883 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
1884 if (vectors < 0) {
1885 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
1886 return -EINVAL;
1887 }
1888 hdev->num_msi = vectors;
1889 hdev->num_msi_left = vectors;
1890 hdev->base_msi_vector = pdev->irq;
1891 hdev->roce_base_vector = hdev->base_msi_vector +
1892 HCLGE_ROCE_VECTOR_OFFSET;
1893
1894 return 0;
1895}
1896
1897static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
1898{
1899 struct hclge_mac *mac = &hdev->hw.mac;
1900
1901 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
1902 mac->duplex = (u8)duplex;
1903 else
1904 mac->duplex = HCLGE_MAC_FULL;
1905
1906 mac->speed = speed;
1907}
1908
1909int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1910{
1911 struct hclge_config_mac_speed_dup *req;
1912 struct hclge_desc desc;
1913 int ret;
1914
1915 req = (struct hclge_config_mac_speed_dup *)desc.data;
1916
1917 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1918
1919 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1920
1921 switch (speed) {
1922 case HCLGE_MAC_SPEED_10M:
1923 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1924 HCLGE_CFG_SPEED_S, 6);
1925 break;
1926 case HCLGE_MAC_SPEED_100M:
1927 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1928 HCLGE_CFG_SPEED_S, 7);
1929 break;
1930 case HCLGE_MAC_SPEED_1G:
1931 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1932 HCLGE_CFG_SPEED_S, 0);
1933 break;
1934 case HCLGE_MAC_SPEED_10G:
1935 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1936 HCLGE_CFG_SPEED_S, 1);
1937 break;
1938 case HCLGE_MAC_SPEED_25G:
1939 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1940 HCLGE_CFG_SPEED_S, 2);
1941 break;
1942 case HCLGE_MAC_SPEED_40G:
1943 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1944 HCLGE_CFG_SPEED_S, 3);
1945 break;
1946 case HCLGE_MAC_SPEED_50G:
1947 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1948 HCLGE_CFG_SPEED_S, 4);
1949 break;
1950 case HCLGE_MAC_SPEED_100G:
1951 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1952 HCLGE_CFG_SPEED_S, 5);
1953 break;
1954 default:
1955 dev_err(&hdev->pdev->dev, "invald speed (%d)\n", speed);
1956 return -EINVAL;
1957 }
1958
1959 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1960 1);
1961
1962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1963 if (ret) {
1964 dev_err(&hdev->pdev->dev,
1965 "mac speed/duplex config cmd failed %d.\n", ret);
1966 return ret;
1967 }
1968
1969 hclge_check_speed_dup(hdev, duplex, speed);
1970
1971 return 0;
1972}
1973
1974static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1975 u8 duplex)
1976{
1977 struct hclge_vport *vport = hclge_get_vport(handle);
1978 struct hclge_dev *hdev = vport->back;
1979
1980 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1981}
1982
1983static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
1984 u8 *duplex)
1985{
1986 struct hclge_query_an_speed_dup *req;
1987 struct hclge_desc desc;
1988 int speed_tmp;
1989 int ret;
1990
1991 req = (struct hclge_query_an_speed_dup *)desc.data;
1992
1993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
1994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1995 if (ret) {
1996 dev_err(&hdev->pdev->dev,
1997 "mac speed/autoneg/duplex query cmd failed %d\n",
1998 ret);
1999 return ret;
2000 }
2001
2002 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2003 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2004 HCLGE_QUERY_SPEED_S);
2005
2006 ret = hclge_parse_speed(speed_tmp, speed);
2007 if (ret) {
2008 dev_err(&hdev->pdev->dev,
2009 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2010 return -EIO;
2011 }
2012
2013 return 0;
2014}
2015
2016static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2017{
2018 struct hclge_mac *mac = &hdev->hw.mac;
2019 struct hclge_query_an_speed_dup *req;
2020 struct hclge_desc desc;
2021 int ret;
2022
2023 req = (struct hclge_query_an_speed_dup *)desc.data;
2024
2025 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2026 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2027 if (ret) {
2028 dev_err(&hdev->pdev->dev,
2029 "autoneg result query cmd failed %d.\n", ret);
2030 return ret;
2031 }
2032
2033 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2034
2035 return 0;
2036}
2037
2038static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2039{
2040 struct hclge_config_auto_neg *req;
2041 struct hclge_desc desc;
2042 int ret;
2043
2044 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2045
2046 req = (struct hclge_config_auto_neg *)desc.data;
2047 hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2048
2049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2050 if (ret) {
2051 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2052 ret);
2053 return ret;
2054 }
2055
2056 return 0;
2057}
2058
2059static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2060{
2061 struct hclge_vport *vport = hclge_get_vport(handle);
2062 struct hclge_dev *hdev = vport->back;
2063
2064 return hclge_set_autoneg_en(hdev, enable);
2065}
2066
2067static int hclge_get_autoneg(struct hnae3_handle *handle)
2068{
2069 struct hclge_vport *vport = hclge_get_vport(handle);
2070 struct hclge_dev *hdev = vport->back;
2071
2072 hclge_query_autoneg_result(hdev);
2073
2074 return hdev->hw.mac.autoneg;
2075}
2076
2077static int hclge_mac_init(struct hclge_dev *hdev)
2078{
2079 struct hclge_mac *mac = &hdev->hw.mac;
2080 int ret;
2081
2082 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2083 if (ret) {
2084 dev_err(&hdev->pdev->dev,
2085 "Config mac speed dup fail ret=%d\n", ret);
2086 return ret;
2087 }
2088
2089 mac->link = 0;
2090
2091 ret = hclge_mac_mdio_config(hdev);
2092 if (ret) {
2093 dev_warn(&hdev->pdev->dev,
2094 "mdio config fail ret=%d\n", ret);
2095 return ret;
2096 }
2097
2098 /* Initialize the MTA table work mode */
2099 hdev->accept_mta_mc = true;
2100 hdev->enable_mta = true;
2101 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2102
2103 ret = hclge_set_mta_filter_mode(hdev,
2104 hdev->mta_mac_sel_type,
2105 hdev->enable_mta);
2106 if (ret) {
2107 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2108 ret);
2109 return ret;
2110 }
2111
2112 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2113}
2114
2115static void hclge_task_schedule(struct hclge_dev *hdev)
2116{
2117 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2118 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2119 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2120 (void)schedule_work(&hdev->service_task);
2121}
2122
2123static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2124{
2125 struct hclge_link_status *req;
2126 struct hclge_desc desc;
2127 int link_status;
2128 int ret;
2129
2130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2131 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2132 if (ret) {
2133 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2134 ret);
2135 return ret;
2136 }
2137
2138 req = (struct hclge_link_status *)desc.data;
2139 link_status = req->status & HCLGE_LINK_STATUS;
2140
2141 return !!link_status;
2142}
2143
2144static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2145{
2146 int mac_state;
2147 int link_stat;
2148
2149 mac_state = hclge_get_mac_link_status(hdev);
2150
2151 if (hdev->hw.mac.phydev) {
2152 if (!genphy_read_status(hdev->hw.mac.phydev))
2153 link_stat = mac_state &
2154 hdev->hw.mac.phydev->link;
2155 else
2156 link_stat = 0;
2157
2158 } else {
2159 link_stat = mac_state;
2160 }
2161
2162 return !!link_stat;
2163}
2164
2165static void hclge_update_link_status(struct hclge_dev *hdev)
2166{
2167 struct hnae3_client *client = hdev->nic_client;
2168 struct hnae3_handle *handle;
2169 int state;
2170 int i;
2171
2172 if (!client)
2173 return;
2174 state = hclge_get_mac_phy_link(hdev);
2175 if (state != hdev->hw.mac.link) {
2176 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2177 handle = &hdev->vport[i].nic;
2178 client->ops->link_status_change(handle, state);
2179 }
2180 hdev->hw.mac.link = state;
2181 }
2182}
2183
2184static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2185{
2186 struct hclge_mac mac = hdev->hw.mac;
2187 u8 duplex;
2188 int speed;
2189 int ret;
2190
2191 /* get the speed and duplex as autoneg'result from mac cmd when phy
2192 * doesn't exit.
2193 */
2194 if (mac.phydev)
2195 return 0;
2196
2197 /* update mac->antoneg. */
2198 ret = hclge_query_autoneg_result(hdev);
2199 if (ret) {
2200 dev_err(&hdev->pdev->dev,
2201 "autoneg result query failed %d\n", ret);
2202 return ret;
2203 }
2204
2205 if (!mac.autoneg)
2206 return 0;
2207
2208 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2209 if (ret) {
2210 dev_err(&hdev->pdev->dev,
2211 "mac autoneg/speed/duplex query failed %d\n", ret);
2212 return ret;
2213 }
2214
2215 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2216 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2217 if (ret) {
2218 dev_err(&hdev->pdev->dev,
2219 "mac speed/duplex config failed %d\n", ret);
2220 return ret;
2221 }
2222 }
2223
2224 return 0;
2225}
2226
2227static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2228{
2229 struct hclge_vport *vport = hclge_get_vport(handle);
2230 struct hclge_dev *hdev = vport->back;
2231
2232 return hclge_update_speed_duplex(hdev);
2233}
2234
2235static int hclge_get_status(struct hnae3_handle *handle)
2236{
2237 struct hclge_vport *vport = hclge_get_vport(handle);
2238 struct hclge_dev *hdev = vport->back;
2239
2240 hclge_update_link_status(hdev);
2241
2242 return hdev->hw.mac.link;
2243}
2244
2245static void hclge_service_timer(unsigned long data)
2246{
2247 struct hclge_dev *hdev = (struct hclge_dev *)data;
2248 (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2249
2250 hclge_task_schedule(hdev);
2251}
2252
2253static void hclge_service_complete(struct hclge_dev *hdev)
2254{
2255 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2256
2257 /* Flush memory before next watchdog */
2258 smp_mb__before_atomic();
2259 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2260}
2261
2262static void hclge_service_task(struct work_struct *work)
2263{
2264 struct hclge_dev *hdev =
2265 container_of(work, struct hclge_dev, service_task);
2266
2267 hclge_update_speed_duplex(hdev);
2268 hclge_update_link_status(hdev);
2269 hclge_update_stats_for_all(hdev);
2270 hclge_service_complete(hdev);
2271}
2272
2273static void hclge_disable_sriov(struct hclge_dev *hdev)
2274{
2275#ifdef CONFIG_PCI_IOV
2276 /* If our VFs are assigned we cannot shut down SR-IOV
2277 * without causing issues, so just leave the hardware
2278 * available but disabled
2279 */
2280 if (pci_vfs_assigned(hdev->pdev)) {
2281 dev_warn(&hdev->pdev->dev,
2282 "disabling driver while VFs are assigned\n");
2283 return;
2284 }
2285
2286 pci_disable_sriov(hdev->pdev);
2287#endif
2288}
2289
2290struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2291{
2292 /* VF handle has no client */
2293 if (!handle->client)
2294 return container_of(handle, struct hclge_vport, nic);
2295 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2296 return container_of(handle, struct hclge_vport, roce);
2297 else
2298 return container_of(handle, struct hclge_vport, nic);
2299}
2300
2301static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2302 struct hnae3_vector_info *vector_info)
2303{
2304 struct hclge_vport *vport = hclge_get_vport(handle);
2305 struct hnae3_vector_info *vector = vector_info;
2306 struct hclge_dev *hdev = vport->back;
2307 int alloc = 0;
2308 int i, j;
2309
2310 vector_num = min(hdev->num_msi_left, vector_num);
2311
2312 for (j = 0; j < vector_num; j++) {
2313 for (i = 1; i < hdev->num_msi; i++) {
2314 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2315 vector->vector = pci_irq_vector(hdev->pdev, i);
2316 vector->io_addr = hdev->hw.io_base +
2317 HCLGE_VECTOR_REG_BASE +
2318 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2319 vport->vport_id *
2320 HCLGE_VECTOR_VF_OFFSET;
2321 hdev->vector_status[i] = vport->vport_id;
2322
2323 vector++;
2324 alloc++;
2325
2326 break;
2327 }
2328 }
2329 }
2330 hdev->num_msi_left -= alloc;
2331 hdev->num_msi_used += alloc;
2332
2333 return alloc;
2334}
2335
2336static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2337{
2338 int i;
2339
2340 for (i = 0; i < hdev->num_msi; i++) {
2341 if (hdev->msix_entries) {
2342 if (vector == hdev->msix_entries[i].vector)
2343 return i;
2344 } else {
2345 if (vector == (hdev->base_msi_vector + i))
2346 return i;
2347 }
2348 }
2349 return -EINVAL;
2350}
2351
2352static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2353{
2354 return HCLGE_RSS_KEY_SIZE;
2355}
2356
2357static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2358{
2359 return HCLGE_RSS_IND_TBL_SIZE;
2360}
2361
2362static int hclge_get_rss_algo(struct hclge_dev *hdev)
2363{
2364 struct hclge_rss_config *req;
2365 struct hclge_desc desc;
2366 int rss_hash_algo;
2367 int ret;
2368
2369 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2370
2371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2372 if (ret) {
2373 dev_err(&hdev->pdev->dev,
2374 "Get link status error, status =%d\n", ret);
2375 return ret;
2376 }
2377
2378 req = (struct hclge_rss_config *)desc.data;
2379 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2380
2381 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2382 return ETH_RSS_HASH_TOP;
2383
2384 return -EINVAL;
2385}
2386
2387static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2388 const u8 hfunc, const u8 *key)
2389{
2390 struct hclge_rss_config *req;
2391 struct hclge_desc desc;
2392 int key_offset;
2393 int key_size;
2394 int ret;
2395
2396 req = (struct hclge_rss_config *)desc.data;
2397
2398 for (key_offset = 0; key_offset < 3; key_offset++) {
2399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2400 false);
2401
2402 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2403 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2404
2405 if (key_offset == 2)
2406 key_size =
2407 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2408 else
2409 key_size = HCLGE_RSS_HASH_KEY_NUM;
2410
2411 memcpy(req->hash_key,
2412 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2413
2414 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2415 if (ret) {
2416 dev_err(&hdev->pdev->dev,
2417 "Configure RSS config fail, status = %d\n",
2418 ret);
2419 return ret;
2420 }
2421 }
2422 return 0;
2423}
2424
2425static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2426{
2427 struct hclge_rss_indirection_table *req;
2428 struct hclge_desc desc;
2429 int i, j;
2430 int ret;
2431
2432 req = (struct hclge_rss_indirection_table *)desc.data;
2433
2434 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2435 hclge_cmd_setup_basic_desc
2436 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2437
2438 req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE;
2439 req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK;
2440
2441 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2442 req->rss_result[j] =
2443 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2444
2445 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2446 if (ret) {
2447 dev_err(&hdev->pdev->dev,
2448 "Configure rss indir table fail,status = %d\n",
2449 ret);
2450 return ret;
2451 }
2452 }
2453 return 0;
2454}
2455
2456static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2457 u16 *tc_size, u16 *tc_offset)
2458{
2459 struct hclge_rss_tc_mode *req;
2460 struct hclge_desc desc;
2461 int ret;
2462 int i;
2463
2464 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2465 req = (struct hclge_rss_tc_mode *)desc.data;
2466
2467 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2468 hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B,
2469 (tc_valid[i] & 0x1));
2470 hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M,
2471 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2472 hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M,
2473 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2474 }
2475
2476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2477 if (ret) {
2478 dev_err(&hdev->pdev->dev,
2479 "Configure rss tc mode fail, status = %d\n", ret);
2480 return ret;
2481 }
2482
2483 return 0;
2484}
2485
2486static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2487{
2488#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf
2489#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f
2490 struct hclge_rss_input_tuple *req;
2491 struct hclge_desc desc;
2492 int ret;
2493
2494 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2495
2496 req = (struct hclge_rss_input_tuple *)desc.data;
2497 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2498 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2499 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2500 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2501 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2502 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2503 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2504 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2505 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2506 if (ret) {
2507 dev_err(&hdev->pdev->dev,
2508 "Configure rss input fail, status = %d\n", ret);
2509 return ret;
2510 }
2511
2512 return 0;
2513}
2514
2515static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2516 u8 *key, u8 *hfunc)
2517{
2518 struct hclge_vport *vport = hclge_get_vport(handle);
2519 struct hclge_dev *hdev = vport->back;
2520 int i;
2521
2522 /* Get hash algorithm */
2523 if (hfunc)
2524 *hfunc = hclge_get_rss_algo(hdev);
2525
2526 /* Get the RSS Key required by the user */
2527 if (key)
2528 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2529
2530 /* Get indirect table */
2531 if (indir)
2532 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2533 indir[i] = vport->rss_indirection_tbl[i];
2534
2535 return 0;
2536}
2537
2538static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2539 const u8 *key, const u8 hfunc)
2540{
2541 struct hclge_vport *vport = hclge_get_vport(handle);
2542 struct hclge_dev *hdev = vport->back;
2543 u8 hash_algo;
2544 int ret, i;
2545
2546 /* Set the RSS Hash Key if specififed by the user */
2547 if (key) {
2548 /* Update the shadow RSS key with user specified qids */
2549 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2550
2551 if (hfunc == ETH_RSS_HASH_TOP ||
2552 hfunc == ETH_RSS_HASH_NO_CHANGE)
2553 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2554 else
2555 return -EINVAL;
2556 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2557 if (ret)
2558 return ret;
2559 }
2560
2561 /* Update the shadow RSS table with user specified qids */
2562 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2563 vport->rss_indirection_tbl[i] = indir[i];
2564
2565 /* Update the hardware */
2566 ret = hclge_set_rss_indir_table(hdev, indir);
2567 return ret;
2568}
2569
2570static int hclge_get_tc_size(struct hnae3_handle *handle)
2571{
2572 struct hclge_vport *vport = hclge_get_vport(handle);
2573 struct hclge_dev *hdev = vport->back;
2574
2575 return hdev->rss_size_max;
2576}
2577
2578static int hclge_rss_init_hw(struct hclge_dev *hdev)
2579{
2580 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2581 struct hclge_vport *vport = hdev->vport;
2582 u16 tc_offset[HCLGE_MAX_TC_NUM];
2583 u8 rss_key[HCLGE_RSS_KEY_SIZE];
2584 u16 tc_valid[HCLGE_MAX_TC_NUM];
2585 u16 tc_size[HCLGE_MAX_TC_NUM];
2586 u32 *rss_indir = NULL;
2587 const u8 *key;
2588 int i, ret, j;
2589
2590 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2591 if (!rss_indir)
2592 return -ENOMEM;
2593
2594 /* Get default RSS key */
2595 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2596
2597 /* Initialize RSS indirect table for each vport */
2598 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2599 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2600 vport[j].rss_indirection_tbl[i] =
2601 i % hdev->rss_size_max;
2602 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2603 }
2604 }
2605 ret = hclge_set_rss_indir_table(hdev, rss_indir);
2606 if (ret)
2607 goto err;
2608
2609 key = rss_key;
2610 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2611 if (ret)
2612 goto err;
2613
2614 ret = hclge_set_rss_input_tuple(hdev);
2615 if (ret)
2616 goto err;
2617
2618 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2619 if (hdev->hw_tc_map & BIT(i))
2620 tc_valid[i] = 1;
2621 else
2622 tc_valid[i] = 0;
2623
2624 switch (hdev->rss_size_max) {
2625 case HCLGE_RSS_TC_SIZE_0:
2626 tc_size[i] = 0;
2627 break;
2628 case HCLGE_RSS_TC_SIZE_1:
2629 tc_size[i] = 1;
2630 break;
2631 case HCLGE_RSS_TC_SIZE_2:
2632 tc_size[i] = 2;
2633 break;
2634 case HCLGE_RSS_TC_SIZE_3:
2635 tc_size[i] = 3;
2636 break;
2637 case HCLGE_RSS_TC_SIZE_4:
2638 tc_size[i] = 4;
2639 break;
2640 case HCLGE_RSS_TC_SIZE_5:
2641 tc_size[i] = 5;
2642 break;
2643 case HCLGE_RSS_TC_SIZE_6:
2644 tc_size[i] = 6;
2645 break;
2646 case HCLGE_RSS_TC_SIZE_7:
2647 tc_size[i] = 7;
2648 break;
2649 default:
2650 break;
2651 }
2652 tc_offset[i] = hdev->rss_size_max * i;
2653 }
2654 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2655
2656err:
2657 kfree(rss_indir);
2658
2659 return ret;
2660}
2661
2662int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2663 struct hnae3_ring_chain_node *ring_chain)
2664{
2665 struct hclge_dev *hdev = vport->back;
2666 struct hclge_ctrl_vector_chain *req;
2667 struct hnae3_ring_chain_node *node;
2668 struct hclge_desc desc;
2669 int ret;
2670 int i;
2671
2672 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2673
2674 req = (struct hclge_ctrl_vector_chain *)desc.data;
2675 req->int_vector_id = vector_id;
2676
2677 i = 0;
2678 for (node = ring_chain; node; node = node->next) {
2679 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2680 HCLGE_INT_TYPE_S,
2681 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2682 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2683 HCLGE_TQP_ID_S, node->tqp_index);
2684 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2685
2686 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2687 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2688
2689 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2690 if (ret) {
2691 dev_err(&hdev->pdev->dev,
2692 "Map TQP fail, status is %d.\n",
2693 ret);
2694 return ret;
2695 }
2696 i = 0;
2697
2698 hclge_cmd_setup_basic_desc(&desc,
2699 HCLGE_OPC_ADD_RING_TO_VECTOR,
2700 false);
2701 req->int_vector_id = vector_id;
2702 }
2703 }
2704
2705 if (i > 0) {
2706 req->int_cause_num = i;
2707
2708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2709 if (ret) {
2710 dev_err(&hdev->pdev->dev,
2711 "Map TQP fail, status is %d.\n", ret);
2712 return ret;
2713 }
2714 }
2715
2716 return 0;
2717}
2718
2719int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle,
2720 int vector,
2721 struct hnae3_ring_chain_node *ring_chain)
2722{
2723 struct hclge_vport *vport = hclge_get_vport(handle);
2724 struct hclge_dev *hdev = vport->back;
2725 int vector_id;
2726
2727 vector_id = hclge_get_vector_index(hdev, vector);
2728 if (vector_id < 0) {
2729 dev_err(&hdev->pdev->dev,
2730 "Get vector index fail. ret =%d\n", vector_id);
2731 return vector_id;
2732 }
2733
2734 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
2735}
2736
2737static int hclge_unmap_ring_from_vector(
2738 struct hnae3_handle *handle, int vector,
2739 struct hnae3_ring_chain_node *ring_chain)
2740{
2741 struct hclge_vport *vport = hclge_get_vport(handle);
2742 struct hclge_dev *hdev = vport->back;
2743 struct hclge_ctrl_vector_chain *req;
2744 struct hnae3_ring_chain_node *node;
2745 struct hclge_desc desc;
2746 int i, vector_id;
2747 int ret;
2748
2749 vector_id = hclge_get_vector_index(hdev, vector);
2750 if (vector_id < 0) {
2751 dev_err(&handle->pdev->dev,
2752 "Get vector index fail. ret =%d\n", vector_id);
2753 return vector_id;
2754 }
2755
2756 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
2757
2758 req = (struct hclge_ctrl_vector_chain *)desc.data;
2759 req->int_vector_id = vector_id;
2760
2761 i = 0;
2762 for (node = ring_chain; node; node = node->next) {
2763 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
2764 HCLGE_INT_TYPE_S,
2765 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2766 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2767 HCLGE_TQP_ID_S, node->tqp_index);
2768
2769 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2770
2771 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2772 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2773
2774 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2775 if (ret) {
2776 dev_err(&hdev->pdev->dev,
2777 "Unmap TQP fail, status is %d.\n",
2778 ret);
2779 return ret;
2780 }
2781 i = 0;
2782 hclge_cmd_setup_basic_desc(&desc,
2783 HCLGE_OPC_ADD_RING_TO_VECTOR,
2784 false);
2785 req->int_vector_id = vector_id;
2786 }
2787 }
2788
2789 if (i > 0) {
2790 req->int_cause_num = i;
2791
2792 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2793 if (ret) {
2794 dev_err(&hdev->pdev->dev,
2795 "Unmap TQP fail, status is %d.\n", ret);
2796 return ret;
2797 }
2798 }
2799
2800 return 0;
2801}
2802
2803int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
2804 struct hclge_promisc_param *param)
2805{
2806 struct hclge_promisc_cfg *req;
2807 struct hclge_desc desc;
2808 int ret;
2809
2810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
2811
2812 req = (struct hclge_promisc_cfg *)desc.data;
2813 req->vf_id = param->vf_id;
2814 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
2815
2816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2817 if (ret) {
2818 dev_err(&hdev->pdev->dev,
2819 "Set promisc mode fail, status is %d.\n", ret);
2820 return ret;
2821 }
2822 return 0;
2823}
2824
2825void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
2826 bool en_mc, bool en_bc, int vport_id)
2827{
2828 if (!param)
2829 return;
2830
2831 memset(param, 0, sizeof(struct hclge_promisc_param));
2832 if (en_uc)
2833 param->enable = HCLGE_PROMISC_EN_UC;
2834 if (en_mc)
2835 param->enable |= HCLGE_PROMISC_EN_MC;
2836 if (en_bc)
2837 param->enable |= HCLGE_PROMISC_EN_BC;
2838 param->vf_id = vport_id;
2839}
2840
2841static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
2842{
2843 struct hclge_vport *vport = hclge_get_vport(handle);
2844 struct hclge_dev *hdev = vport->back;
2845 struct hclge_promisc_param param;
2846
2847 hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
2848 hclge_cmd_set_promisc_mode(hdev, &param);
2849}
2850
2851static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
2852{
2853 struct hclge_desc desc;
2854 struct hclge_config_mac_mode *req =
2855 (struct hclge_config_mac_mode *)desc.data;
2856 int ret;
2857
2858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
2859 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable);
2860 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable);
2861 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable);
2862 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable);
2863 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0);
2864 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0);
2865 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0);
2866 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0);
2867 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable);
2868 hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable);
2869 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2870 HCLGE_MAC_RX_FCS_STRIP_B, enable);
2871 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2872 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
2873 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2874 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
2875 hnae_set_bit(req->txrx_pad_fcs_loop_en,
2876 HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
2877
2878 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2879 if (ret)
2880 dev_err(&hdev->pdev->dev,
2881 "mac enable fail, ret =%d.\n", ret);
2882}
2883
2884static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
2885 int stream_id, bool enable)
2886{
2887 struct hclge_desc desc;
2888 struct hclge_cfg_com_tqp_queue *req =
2889 (struct hclge_cfg_com_tqp_queue *)desc.data;
2890 int ret;
2891
2892 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
2893 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
2894 req->stream_id = cpu_to_le16(stream_id);
2895 req->enable |= enable << HCLGE_TQP_ENABLE_B;
2896
2897 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2898 if (ret)
2899 dev_err(&hdev->pdev->dev,
2900 "Tqp enable fail, status =%d.\n", ret);
2901 return ret;
2902}
2903
2904static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
2905{
2906 struct hclge_vport *vport = hclge_get_vport(handle);
2907 struct hnae3_queue *queue;
2908 struct hclge_tqp *tqp;
2909 int i;
2910
2911 for (i = 0; i < vport->alloc_tqps; i++) {
2912 queue = handle->kinfo.tqp[i];
2913 tqp = container_of(queue, struct hclge_tqp, q);
2914 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
2915 }
2916}
2917
2918static int hclge_ae_start(struct hnae3_handle *handle)
2919{
2920 struct hclge_vport *vport = hclge_get_vport(handle);
2921 struct hclge_dev *hdev = vport->back;
2922 int i, queue_id, ret;
2923
2924 for (i = 0; i < vport->alloc_tqps; i++) {
2925 /* todo clear interrupt */
2926 /* ring enable */
2927 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2928 if (queue_id < 0) {
2929 dev_warn(&hdev->pdev->dev,
2930 "Get invalid queue id, ignore it\n");
2931 continue;
2932 }
2933
2934 hclge_tqp_enable(hdev, queue_id, 0, true);
2935 }
2936 /* mac enable */
2937 hclge_cfg_mac_mode(hdev, true);
2938 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
2939 (void)mod_timer(&hdev->service_timer, jiffies + HZ);
2940
2941 ret = hclge_mac_start_phy(hdev);
2942 if (ret)
2943 return ret;
2944
2945 /* reset tqp stats */
2946 hclge_reset_tqp_stats(handle);
2947
2948 return 0;
2949}
2950
2951static void hclge_ae_stop(struct hnae3_handle *handle)
2952{
2953 struct hclge_vport *vport = hclge_get_vport(handle);
2954 struct hclge_dev *hdev = vport->back;
2955 int i, queue_id;
2956
2957 for (i = 0; i < vport->alloc_tqps; i++) {
2958 /* Ring disable */
2959 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
2960 if (queue_id < 0) {
2961 dev_warn(&hdev->pdev->dev,
2962 "Get invalid queue id, ignore it\n");
2963 continue;
2964 }
2965
2966 hclge_tqp_enable(hdev, queue_id, 0, false);
2967 }
2968 /* Mac disable */
2969 hclge_cfg_mac_mode(hdev, false);
2970
2971 hclge_mac_stop_phy(hdev);
2972
2973 /* reset tqp stats */
2974 hclge_reset_tqp_stats(handle);
2975}
2976
2977static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
2978 u16 cmdq_resp, u8 resp_code,
2979 enum hclge_mac_vlan_tbl_opcode op)
2980{
2981 struct hclge_dev *hdev = vport->back;
2982 int return_status = -EIO;
2983
2984 if (cmdq_resp) {
2985 dev_err(&hdev->pdev->dev,
2986 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
2987 cmdq_resp);
2988 return -EIO;
2989 }
2990
2991 if (op == HCLGE_MAC_VLAN_ADD) {
2992 if ((!resp_code) || (resp_code == 1)) {
2993 return_status = 0;
2994 } else if (resp_code == 2) {
2995 return_status = -EIO;
2996 dev_err(&hdev->pdev->dev,
2997 "add mac addr failed for uc_overflow.\n");
2998 } else if (resp_code == 3) {
2999 return_status = -EIO;
3000 dev_err(&hdev->pdev->dev,
3001 "add mac addr failed for mc_overflow.\n");
3002 } else {
3003 dev_err(&hdev->pdev->dev,
3004 "add mac addr failed for undefined, code=%d.\n",
3005 resp_code);
3006 }
3007 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3008 if (!resp_code) {
3009 return_status = 0;
3010 } else if (resp_code == 1) {
3011 return_status = -EIO;
3012 dev_dbg(&hdev->pdev->dev,
3013 "remove mac addr failed for miss.\n");
3014 } else {
3015 dev_err(&hdev->pdev->dev,
3016 "remove mac addr failed for undefined, code=%d.\n",
3017 resp_code);
3018 }
3019 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3020 if (!resp_code) {
3021 return_status = 0;
3022 } else if (resp_code == 1) {
3023 return_status = -EIO;
3024 dev_dbg(&hdev->pdev->dev,
3025 "lookup mac addr failed for miss.\n");
3026 } else {
3027 dev_err(&hdev->pdev->dev,
3028 "lookup mac addr failed for undefined, code=%d.\n",
3029 resp_code);
3030 }
3031 } else {
3032 return_status = -EIO;
3033 dev_err(&hdev->pdev->dev,
3034 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3035 op);
3036 }
3037
3038 return return_status;
3039}
3040
3041static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3042{
3043 int word_num;
3044 int bit_num;
3045
3046 if (vfid > 255 || vfid < 0)
3047 return -EIO;
3048
3049 if (vfid >= 0 && vfid <= 191) {
3050 word_num = vfid / 32;
3051 bit_num = vfid % 32;
3052 if (clr)
3053 desc[1].data[word_num] &= ~(1 << bit_num);
3054 else
3055 desc[1].data[word_num] |= (1 << bit_num);
3056 } else {
3057 word_num = (vfid - 192) / 32;
3058 bit_num = vfid % 32;
3059 if (clr)
3060 desc[2].data[word_num] &= ~(1 << bit_num);
3061 else
3062 desc[2].data[word_num] |= (1 << bit_num);
3063 }
3064
3065 return 0;
3066}
3067
3068static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3069{
3070#define HCLGE_DESC_NUMBER 3
3071#define HCLGE_FUNC_NUMBER_PER_DESC 6
3072 int i, j;
3073
3074 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3075 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3076 if (desc[i].data[j])
3077 return false;
3078
3079 return true;
3080}
3081
3082static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
3083 const u8 *addr)
3084{
3085 const unsigned char *mac_addr = addr;
3086 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3087 (mac_addr[0]) | (mac_addr[1] << 8);
3088 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3089
3090 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3091 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3092}
3093
3094u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3095 const u8 *addr)
3096{
3097 u16 high_val = addr[1] | (addr[0] << 8);
3098 struct hclge_dev *hdev = vport->back;
3099 u32 rsh = 4 - hdev->mta_mac_sel_type;
3100 u16 ret_val = (high_val >> rsh) & 0xfff;
3101
3102 return ret_val;
3103}
3104
3105static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3106 enum hclge_mta_dmac_sel_type mta_mac_sel,
3107 bool enable)
3108{
3109 struct hclge_mta_filter_mode *req;
3110 struct hclge_desc desc;
3111 int ret;
3112
3113 req = (struct hclge_mta_filter_mode *)desc.data;
3114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3115
3116 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3117 enable);
3118 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3119 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3120
3121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3122 if (ret) {
3123 dev_err(&hdev->pdev->dev,
3124 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3125 ret);
3126 return ret;
3127 }
3128
3129 return 0;
3130}
3131
3132int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3133 u8 func_id,
3134 bool enable)
3135{
3136 struct hclge_cfg_func_mta_filter *req;
3137 struct hclge_desc desc;
3138 int ret;
3139
3140 req = (struct hclge_cfg_func_mta_filter *)desc.data;
3141 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3142
3143 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3144 enable);
3145 req->function_id = func_id;
3146
3147 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3148 if (ret) {
3149 dev_err(&hdev->pdev->dev,
3150 "Config func_id enable failed for cmd_send, ret =%d.\n",
3151 ret);
3152 return ret;
3153 }
3154
3155 return 0;
3156}
3157
3158static int hclge_set_mta_table_item(struct hclge_vport *vport,
3159 u16 idx,
3160 bool enable)
3161{
3162 struct hclge_dev *hdev = vport->back;
3163 struct hclge_cfg_func_mta_item *req;
3164 struct hclge_desc desc;
3165 int ret;
3166
3167 req = (struct hclge_cfg_func_mta_item *)desc.data;
3168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3169 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3170
3171 hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3172 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3173 req->item_idx = cpu_to_le16(req->item_idx);
3174
3175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3176 if (ret) {
3177 dev_err(&hdev->pdev->dev,
3178 "Config mta table item failed for cmd_send, ret =%d.\n",
3179 ret);
3180 return ret;
3181 }
3182
3183 return 0;
3184}
3185
3186static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3187 struct hclge_mac_vlan_tbl_entry *req)
3188{
3189 struct hclge_dev *hdev = vport->back;
3190 struct hclge_desc desc;
3191 u8 resp_code;
3192 int ret;
3193
3194 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3195
3196 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3197
3198 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3199 if (ret) {
3200 dev_err(&hdev->pdev->dev,
3201 "del mac addr failed for cmd_send, ret =%d.\n",
3202 ret);
3203 return ret;
3204 }
3205 resp_code = (desc.data[0] >> 8) & 0xff;
3206
3207 return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code,
3208 HCLGE_MAC_VLAN_REMOVE);
3209}
3210
3211static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3212 struct hclge_mac_vlan_tbl_entry *req,
3213 struct hclge_desc *desc,
3214 bool is_mc)
3215{
3216 struct hclge_dev *hdev = vport->back;
3217 u8 resp_code;
3218 int ret;
3219
3220 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3221 if (is_mc) {
3222 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3223 memcpy(desc[0].data,
3224 req,
3225 sizeof(struct hclge_mac_vlan_tbl_entry));
3226 hclge_cmd_setup_basic_desc(&desc[1],
3227 HCLGE_OPC_MAC_VLAN_ADD,
3228 true);
3229 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3230 hclge_cmd_setup_basic_desc(&desc[2],
3231 HCLGE_OPC_MAC_VLAN_ADD,
3232 true);
3233 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3234 } else {
3235 memcpy(desc[0].data,
3236 req,
3237 sizeof(struct hclge_mac_vlan_tbl_entry));
3238 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3239 }
3240 if (ret) {
3241 dev_err(&hdev->pdev->dev,
3242 "lookup mac addr failed for cmd_send, ret =%d.\n",
3243 ret);
3244 return ret;
3245 }
3246 resp_code = (desc[0].data[0] >> 8) & 0xff;
3247
3248 return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code,
3249 HCLGE_MAC_VLAN_LKUP);
3250}
3251
3252static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3253 struct hclge_mac_vlan_tbl_entry *req,
3254 struct hclge_desc *mc_desc)
3255{
3256 struct hclge_dev *hdev = vport->back;
3257 int cfg_status;
3258 u8 resp_code;
3259 int ret;
3260
3261 if (!mc_desc) {
3262 struct hclge_desc desc;
3263
3264 hclge_cmd_setup_basic_desc(&desc,
3265 HCLGE_OPC_MAC_VLAN_ADD,
3266 false);
3267 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
3268 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3269 resp_code = (desc.data[0] >> 8) & 0xff;
3270 cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval,
3271 resp_code,
3272 HCLGE_MAC_VLAN_ADD);
3273 } else {
3274 mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3275 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3276 mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3277 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3278 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
3279 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3280 memcpy(mc_desc[0].data, req,
3281 sizeof(struct hclge_mac_vlan_tbl_entry));
3282 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3283 resp_code = (mc_desc[0].data[0] >> 8) & 0xff;
3284 cfg_status = hclge_get_mac_vlan_cmd_status(vport,
3285 mc_desc[0].retval,
3286 resp_code,
3287 HCLGE_MAC_VLAN_ADD);
3288 }
3289
3290 if (ret) {
3291 dev_err(&hdev->pdev->dev,
3292 "add mac addr failed for cmd_send, ret =%d.\n",
3293 ret);
3294 return ret;
3295 }
3296
3297 return cfg_status;
3298}
3299
3300static int hclge_add_uc_addr(struct hnae3_handle *handle,
3301 const unsigned char *addr)
3302{
3303 struct hclge_vport *vport = hclge_get_vport(handle);
3304
3305 return hclge_add_uc_addr_common(vport, addr);
3306}
3307
3308int hclge_add_uc_addr_common(struct hclge_vport *vport,
3309 const unsigned char *addr)
3310{
3311 struct hclge_dev *hdev = vport->back;
3312 struct hclge_mac_vlan_tbl_entry req;
3313 enum hclge_cmd_status status;
3314
3315 /* mac addr check */
3316 if (is_zero_ether_addr(addr) ||
3317 is_broadcast_ether_addr(addr) ||
3318 is_multicast_ether_addr(addr)) {
3319 dev_err(&hdev->pdev->dev,
3320 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3321 addr,
3322 is_zero_ether_addr(addr),
3323 is_broadcast_ether_addr(addr),
3324 is_multicast_ether_addr(addr));
3325 return -EINVAL;
3326 }
3327
3328 memset(&req, 0, sizeof(req));
3329 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3330 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3331 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3332 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3333 hnae_set_bit(req.egress_port,
3334 HCLGE_MAC_EPORT_SW_EN_B, 0);
3335 hnae_set_bit(req.egress_port,
3336 HCLGE_MAC_EPORT_TYPE_B, 0);
3337 hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M,
3338 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3339 hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M,
3340 HCLGE_MAC_EPORT_PFID_S, 0);
3341 req.egress_port = cpu_to_le16(req.egress_port);
3342
3343 hclge_prepare_mac_addr(&req, addr);
3344
3345 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3346
3347 return status;
3348}
3349
3350static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3351 const unsigned char *addr)
3352{
3353 struct hclge_vport *vport = hclge_get_vport(handle);
3354
3355 return hclge_rm_uc_addr_common(vport, addr);
3356}
3357
3358int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3359 const unsigned char *addr)
3360{
3361 struct hclge_dev *hdev = vport->back;
3362 struct hclge_mac_vlan_tbl_entry req;
3363 enum hclge_cmd_status status;
3364
3365 /* mac addr check */
3366 if (is_zero_ether_addr(addr) ||
3367 is_broadcast_ether_addr(addr) ||
3368 is_multicast_ether_addr(addr)) {
3369 dev_dbg(&hdev->pdev->dev,
3370 "Remove mac err! invalid mac:%pM.\n",
3371 addr);
3372 return -EINVAL;
3373 }
3374
3375 memset(&req, 0, sizeof(req));
3376 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3377 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3378 hclge_prepare_mac_addr(&req, addr);
3379 status = hclge_remove_mac_vlan_tbl(vport, &req);
3380
3381 return status;
3382}
3383
3384static int hclge_add_mc_addr(struct hnae3_handle *handle,
3385 const unsigned char *addr)
3386{
3387 struct hclge_vport *vport = hclge_get_vport(handle);
3388
3389 return hclge_add_mc_addr_common(vport, addr);
3390}
3391
3392int hclge_add_mc_addr_common(struct hclge_vport *vport,
3393 const unsigned char *addr)
3394{
3395 struct hclge_dev *hdev = vport->back;
3396 struct hclge_mac_vlan_tbl_entry req;
3397 struct hclge_desc desc[3];
3398 u16 tbl_idx;
3399 int status;
3400
3401 /* mac addr check */
3402 if (!is_multicast_ether_addr(addr)) {
3403 dev_err(&hdev->pdev->dev,
3404 "Add mc mac err! invalid mac:%pM.\n",
3405 addr);
3406 return -EINVAL;
3407 }
3408 memset(&req, 0, sizeof(req));
3409 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3410 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3411 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3412 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3413 hclge_prepare_mac_addr(&req, addr);
3414 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3415 if (!status) {
3416 /* This mac addr exist, update VFID for it */
3417 hclge_update_desc_vfid(desc, vport->vport_id, false);
3418 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3419 } else {
3420 /* This mac addr do not exist, add new entry for it */
3421 memset(desc[0].data, 0, sizeof(desc[0].data));
3422 memset(desc[1].data, 0, sizeof(desc[0].data));
3423 memset(desc[2].data, 0, sizeof(desc[0].data));
3424 hclge_update_desc_vfid(desc, vport->vport_id, false);
3425 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3426 }
3427
3428 /* Set MTA table for this MAC address */
3429 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3430 status = hclge_set_mta_table_item(vport, tbl_idx, true);
3431
3432 return status;
3433}
3434
3435static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3436 const unsigned char *addr)
3437{
3438 struct hclge_vport *vport = hclge_get_vport(handle);
3439
3440 return hclge_rm_mc_addr_common(vport, addr);
3441}
3442
3443int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3444 const unsigned char *addr)
3445{
3446 struct hclge_dev *hdev = vport->back;
3447 struct hclge_mac_vlan_tbl_entry req;
3448 enum hclge_cmd_status status;
3449 struct hclge_desc desc[3];
3450 u16 tbl_idx;
3451
3452 /* mac addr check */
3453 if (!is_multicast_ether_addr(addr)) {
3454 dev_dbg(&hdev->pdev->dev,
3455 "Remove mc mac err! invalid mac:%pM.\n",
3456 addr);
3457 return -EINVAL;
3458 }
3459
3460 memset(&req, 0, sizeof(req));
3461 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3462 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3463 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3464 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3465 hclge_prepare_mac_addr(&req, addr);
3466 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3467 if (!status) {
3468 /* This mac addr exist, remove this handle's VFID for it */
3469 hclge_update_desc_vfid(desc, vport->vport_id, true);
3470
3471 if (hclge_is_all_function_id_zero(desc))
3472 /* All the vfid is zero, so need to delete this entry */
3473 status = hclge_remove_mac_vlan_tbl(vport, &req);
3474 else
3475 /* Not all the vfid is zero, update the vfid */
3476 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3477
3478 } else {
3479 /* This mac addr do not exist, can't delete it */
3480 dev_err(&hdev->pdev->dev,
3481 "Rm mutilcast mac addr failed, ret = %d.\n",
3482 status);
3483 return -EIO;
3484 }
3485
3486 /* Set MTB table for this MAC address */
3487 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3488 status = hclge_set_mta_table_item(vport, tbl_idx, false);
3489
3490 return status;
3491}
3492
3493static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3494{
3495 struct hclge_vport *vport = hclge_get_vport(handle);
3496 struct hclge_dev *hdev = vport->back;
3497
3498 ether_addr_copy(p, hdev->hw.mac.mac_addr);
3499}
3500
3501static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3502{
3503 const unsigned char *new_addr = (const unsigned char *)p;
3504 struct hclge_vport *vport = hclge_get_vport(handle);
3505 struct hclge_dev *hdev = vport->back;
3506
3507 /* mac addr check */
3508 if (is_zero_ether_addr(new_addr) ||
3509 is_broadcast_ether_addr(new_addr) ||
3510 is_multicast_ether_addr(new_addr)) {
3511 dev_err(&hdev->pdev->dev,
3512 "Change uc mac err! invalid mac:%p.\n",
3513 new_addr);
3514 return -EINVAL;
3515 }
3516
3517 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3518
3519 if (!hclge_add_uc_addr(handle, new_addr)) {
3520 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3521 return 0;
3522 }
3523
3524 return -EIO;
3525}
3526
3527static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3528 bool filter_en)
3529{
3530 struct hclge_vlan_filter_ctrl *req;
3531 struct hclge_desc desc;
3532 int ret;
3533
3534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3535
3536 req = (struct hclge_vlan_filter_ctrl *)desc.data;
3537 req->vlan_type = vlan_type;
3538 req->vlan_fe = filter_en;
3539
3540 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3541 if (ret) {
3542 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3543 ret);
3544 return ret;
3545 }
3546
3547 return 0;
3548}
3549
3550int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3551 bool is_kill, u16 vlan, u8 qos, __be16 proto)
3552{
3553#define HCLGE_MAX_VF_BYTES 16
3554 struct hclge_vlan_filter_vf_cfg *req0;
3555 struct hclge_vlan_filter_vf_cfg *req1;
3556 struct hclge_desc desc[2];
3557 u8 vf_byte_val;
3558 u8 vf_byte_off;
3559 int ret;
3560
3561 hclge_cmd_setup_basic_desc(&desc[0],
3562 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3563 hclge_cmd_setup_basic_desc(&desc[1],
3564 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3565
3566 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3567
3568 vf_byte_off = vfid / 8;
3569 vf_byte_val = 1 << (vfid % 8);
3570
3571 req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data;
3572 req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data;
3573
3574 req0->vlan_id = vlan;
3575 req0->vlan_cfg = is_kill;
3576
3577 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3578 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3579 else
3580 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3581
3582 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3583 if (ret) {
3584 dev_err(&hdev->pdev->dev,
3585 "Send vf vlan command fail, ret =%d.\n",
3586 ret);
3587 return ret;
3588 }
3589
3590 if (!is_kill) {
3591 if (!req0->resp_code || req0->resp_code == 1)
3592 return 0;
3593
3594 dev_err(&hdev->pdev->dev,
3595 "Add vf vlan filter fail, ret =%d.\n",
3596 req0->resp_code);
3597 } else {
3598 if (!req0->resp_code)
3599 return 0;
3600
3601 dev_err(&hdev->pdev->dev,
3602 "Kill vf vlan filter fail, ret =%d.\n",
3603 req0->resp_code);
3604 }
3605
3606 return -EIO;
3607}
3608
3609static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3610 __be16 proto, u16 vlan_id,
3611 bool is_kill)
3612{
3613 struct hclge_vport *vport = hclge_get_vport(handle);
3614 struct hclge_dev *hdev = vport->back;
3615 struct hclge_vlan_filter_pf_cfg *req;
3616 struct hclge_desc desc;
3617 u8 vlan_offset_byte_val;
3618 u8 vlan_offset_byte;
3619 u8 vlan_offset_160;
3620 int ret;
3621
3622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3623
3624 vlan_offset_160 = vlan_id / 160;
3625 vlan_offset_byte = (vlan_id % 160) / 8;
3626 vlan_offset_byte_val = 1 << (vlan_id % 8);
3627
3628 req = (struct hclge_vlan_filter_pf_cfg *)desc.data;
3629 req->vlan_offset = vlan_offset_160;
3630 req->vlan_cfg = is_kill;
3631 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3632
3633 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3634 if (ret) {
3635 dev_err(&hdev->pdev->dev,
3636 "port vlan command, send fail, ret =%d.\n",
3637 ret);
3638 return ret;
3639 }
3640
3641 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
3642 if (ret) {
3643 dev_err(&hdev->pdev->dev,
3644 "Set pf vlan filter config fail, ret =%d.\n",
3645 ret);
3646 return -EIO;
3647 }
3648
3649 return 0;
3650}
3651
3652static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
3653 u16 vlan, u8 qos, __be16 proto)
3654{
3655 struct hclge_vport *vport = hclge_get_vport(handle);
3656 struct hclge_dev *hdev = vport->back;
3657
3658 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
3659 return -EINVAL;
3660 if (proto != htons(ETH_P_8021Q))
3661 return -EPROTONOSUPPORT;
3662
3663 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
3664}
3665
3666static int hclge_init_vlan_config(struct hclge_dev *hdev)
3667{
3668#define HCLGE_VLAN_TYPE_VF_TABLE 0
3669#define HCLGE_VLAN_TYPE_PORT_TABLE 1
3670 int ret;
3671
3672 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
3673 true);
3674 if (ret)
3675 return ret;
3676
3677 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
3678 true);
3679
3680 return ret;
3681}
3682
3683static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
3684{
3685 struct hclge_vport *vport = hclge_get_vport(handle);
3686 struct hclge_config_max_frm_size *req;
3687 struct hclge_dev *hdev = vport->back;
3688 struct hclge_desc desc;
3689 int ret;
3690
3691 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
3692 return -EINVAL;
3693
3694 hdev->mps = new_mtu;
3695 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
3696
3697 req = (struct hclge_config_max_frm_size *)desc.data;
3698 req->max_frm_size = cpu_to_le16(new_mtu);
3699
3700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3701 if (ret) {
3702 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
3703 return ret;
3704 }
3705
3706 return 0;
3707}
3708
3709static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
3710 bool enable)
3711{
3712 struct hclge_reset_tqp_queue *req;
3713 struct hclge_desc desc;
3714 int ret;
3715
3716 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
3717
3718 req = (struct hclge_reset_tqp_queue *)desc.data;
3719 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3720 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
3721
3722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3723 if (ret) {
3724 dev_err(&hdev->pdev->dev,
3725 "Send tqp reset cmd error, status =%d\n", ret);
3726 return ret;
3727 }
3728
3729 return 0;
3730}
3731
3732static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
3733{
3734 struct hclge_reset_tqp_queue *req;
3735 struct hclge_desc desc;
3736 int ret;
3737
3738 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
3739
3740 req = (struct hclge_reset_tqp_queue *)desc.data;
3741 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
3742
3743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3744 if (ret) {
3745 dev_err(&hdev->pdev->dev,
3746 "Get reset status error, status =%d\n", ret);
3747 return ret;
3748 }
3749
3750 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
3751}
3752
3753static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
3754{
3755 struct hclge_vport *vport = hclge_get_vport(handle);
3756 struct hclge_dev *hdev = vport->back;
3757 int reset_try_times = 0;
3758 int reset_status;
3759 int ret;
3760
3761 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
3762 if (ret) {
3763 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
3764 return;
3765 }
3766
3767 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
3768 if (ret) {
3769 dev_warn(&hdev->pdev->dev,
3770 "Send reset tqp cmd fail, ret = %d\n", ret);
3771 return;
3772 }
3773
3774 reset_try_times = 0;
3775 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
3776 /* Wait for tqp hw reset */
3777 msleep(20);
3778 reset_status = hclge_get_reset_status(hdev, queue_id);
3779 if (reset_status)
3780 break;
3781 }
3782
3783 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
3784 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
3785 return;
3786 }
3787
3788 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
3789 if (ret) {
3790 dev_warn(&hdev->pdev->dev,
3791 "Deassert the soft reset fail, ret = %d\n", ret);
3792 return;
3793 }
3794}
3795
3796static u32 hclge_get_fw_version(struct hnae3_handle *handle)
3797{
3798 struct hclge_vport *vport = hclge_get_vport(handle);
3799 struct hclge_dev *hdev = vport->back;
3800
3801 return hdev->fw_version;
3802}
3803
3804static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
3805 u32 *rx_en, u32 *tx_en)
3806{
3807 struct hclge_vport *vport = hclge_get_vport(handle);
3808 struct hclge_dev *hdev = vport->back;
3809
3810 *auto_neg = hclge_get_autoneg(handle);
3811
3812 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
3813 *rx_en = 0;
3814 *tx_en = 0;
3815 return;
3816 }
3817
3818 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
3819 *rx_en = 1;
3820 *tx_en = 0;
3821 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
3822 *tx_en = 1;
3823 *rx_en = 0;
3824 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
3825 *rx_en = 1;
3826 *tx_en = 1;
3827 } else {
3828 *rx_en = 0;
3829 *tx_en = 0;
3830 }
3831}
3832
3833static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
3834 u8 *auto_neg, u32 *speed, u8 *duplex)
3835{
3836 struct hclge_vport *vport = hclge_get_vport(handle);
3837 struct hclge_dev *hdev = vport->back;
3838
3839 if (speed)
3840 *speed = hdev->hw.mac.speed;
3841 if (duplex)
3842 *duplex = hdev->hw.mac.duplex;
3843 if (auto_neg)
3844 *auto_neg = hdev->hw.mac.autoneg;
3845}
3846
3847static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
3848{
3849 struct hclge_vport *vport = hclge_get_vport(handle);
3850 struct hclge_dev *hdev = vport->back;
3851
3852 if (media_type)
3853 *media_type = hdev->hw.mac.media_type;
3854}
3855
3856static void hclge_get_mdix_mode(struct hnae3_handle *handle,
3857 u8 *tp_mdix_ctrl, u8 *tp_mdix)
3858{
3859 struct hclge_vport *vport = hclge_get_vport(handle);
3860 struct hclge_dev *hdev = vport->back;
3861 struct phy_device *phydev = hdev->hw.mac.phydev;
3862 int mdix_ctrl, mdix, retval, is_resolved;
3863
3864 if (!phydev) {
3865 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3866 *tp_mdix = ETH_TP_MDI_INVALID;
3867 return;
3868 }
3869
3870 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
3871
3872 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
3873 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
3874 HCLGE_PHY_MDIX_CTRL_S);
3875
3876 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
3877 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
3878 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
3879
3880 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
3881
3882 switch (mdix_ctrl) {
3883 case 0x0:
3884 *tp_mdix_ctrl = ETH_TP_MDI;
3885 break;
3886 case 0x1:
3887 *tp_mdix_ctrl = ETH_TP_MDI_X;
3888 break;
3889 case 0x3:
3890 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
3891 break;
3892 default:
3893 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
3894 break;
3895 }
3896
3897 if (!is_resolved)
3898 *tp_mdix = ETH_TP_MDI_INVALID;
3899 else if (mdix)
3900 *tp_mdix = ETH_TP_MDI_X;
3901 else
3902 *tp_mdix = ETH_TP_MDI;
3903}
3904
3905static int hclge_init_client_instance(struct hnae3_client *client,
3906 struct hnae3_ae_dev *ae_dev)
3907{
3908 struct hclge_dev *hdev = ae_dev->priv;
3909 struct hclge_vport *vport;
3910 int i, ret;
3911
3912 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3913 vport = &hdev->vport[i];
3914
3915 switch (client->type) {
3916 case HNAE3_CLIENT_KNIC:
3917
3918 hdev->nic_client = client;
3919 vport->nic.client = client;
3920 ret = client->ops->init_instance(&vport->nic);
3921 if (ret)
3922 goto err;
3923
3924 if (hdev->roce_client &&
3925 hnae_get_bit(hdev->ae_dev->flag,
3926 HNAE_DEV_SUPPORT_ROCE_B)) {
3927 struct hnae3_client *rc = hdev->roce_client;
3928
3929 ret = hclge_init_roce_base_info(vport);
3930 if (ret)
3931 goto err;
3932
3933 ret = rc->ops->init_instance(&vport->roce);
3934 if (ret)
3935 goto err;
3936 }
3937
3938 break;
3939 case HNAE3_CLIENT_UNIC:
3940 hdev->nic_client = client;
3941 vport->nic.client = client;
3942
3943 ret = client->ops->init_instance(&vport->nic);
3944 if (ret)
3945 goto err;
3946
3947 break;
3948 case HNAE3_CLIENT_ROCE:
3949 if (hnae_get_bit(hdev->ae_dev->flag,
3950 HNAE_DEV_SUPPORT_ROCE_B)) {
3951 hdev->roce_client = client;
3952 vport->roce.client = client;
3953 }
3954
3955 if (hdev->roce_client) {
3956 ret = hclge_init_roce_base_info(vport);
3957 if (ret)
3958 goto err;
3959
3960 ret = client->ops->init_instance(&vport->roce);
3961 if (ret)
3962 goto err;
3963 }
3964 }
3965 }
3966
3967 return 0;
3968err:
3969 return ret;
3970}
3971
3972static void hclge_uninit_client_instance(struct hnae3_client *client,
3973 struct hnae3_ae_dev *ae_dev)
3974{
3975 struct hclge_dev *hdev = ae_dev->priv;
3976 struct hclge_vport *vport;
3977 int i;
3978
3979 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3980 vport = &hdev->vport[i];
3981 if (hdev->roce_client)
3982 hdev->roce_client->ops->uninit_instance(&vport->roce,
3983 0);
3984 if (client->type == HNAE3_CLIENT_ROCE)
3985 return;
3986 if (client->ops->uninit_instance)
3987 client->ops->uninit_instance(&vport->nic, 0);
3988 }
3989}
3990
3991static int hclge_pci_init(struct hclge_dev *hdev)
3992{
3993 struct pci_dev *pdev = hdev->pdev;
3994 struct hclge_hw *hw;
3995 int ret;
3996
3997 ret = pci_enable_device(pdev);
3998 if (ret) {
3999 dev_err(&pdev->dev, "failed to enable PCI device\n");
4000 goto err_no_drvdata;
4001 }
4002
4003 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4004 if (ret) {
4005 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4006 if (ret) {
4007 dev_err(&pdev->dev,
4008 "can't set consistent PCI DMA");
4009 goto err_disable_device;
4010 }
4011 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4012 }
4013
4014 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4015 if (ret) {
4016 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4017 goto err_disable_device;
4018 }
4019
4020 pci_set_master(pdev);
4021 hw = &hdev->hw;
4022 hw->back = hdev;
4023 hw->io_base = pcim_iomap(pdev, 2, 0);
4024 if (!hw->io_base) {
4025 dev_err(&pdev->dev, "Can't map configuration register space\n");
4026 ret = -ENOMEM;
4027 goto err_clr_master;
4028 }
4029
4030 return 0;
4031err_clr_master:
4032 pci_clear_master(pdev);
4033 pci_release_regions(pdev);
4034err_disable_device:
4035 pci_disable_device(pdev);
4036err_no_drvdata:
4037 pci_set_drvdata(pdev, NULL);
4038
4039 return ret;
4040}
4041
4042static void hclge_pci_uninit(struct hclge_dev *hdev)
4043{
4044 struct pci_dev *pdev = hdev->pdev;
4045
4046 if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4047 pci_disable_msix(pdev);
4048 devm_kfree(&pdev->dev, hdev->msix_entries);
4049 hdev->msix_entries = NULL;
4050 } else {
4051 pci_disable_msi(pdev);
4052 }
4053
4054 pci_clear_master(pdev);
4055 pci_release_mem_regions(pdev);
4056 pci_disable_device(pdev);
4057}
4058
4059static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4060{
4061 struct pci_dev *pdev = ae_dev->pdev;
4062 const struct pci_device_id *id;
4063 struct hclge_dev *hdev;
4064 int ret;
4065
4066 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4067 if (!hdev) {
4068 ret = -ENOMEM;
4069 goto err_hclge_dev;
4070 }
4071
4072 hdev->flag |= HCLGE_FLAG_USE_MSIX;
4073 hdev->pdev = pdev;
4074 hdev->ae_dev = ae_dev;
4075 ae_dev->priv = hdev;
4076
4077 id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
4078 if (id)
4079 hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
4080
4081 ret = hclge_pci_init(hdev);
4082 if (ret) {
4083 dev_err(&pdev->dev, "PCI init failed\n");
4084 goto err_pci_init;
4085 }
4086
4087 /* Command queue initialize */
4088 ret = hclge_cmd_init(hdev);
4089 if (ret)
4090 goto err_cmd_init;
4091
4092 ret = hclge_get_cap(hdev);
4093 if (ret) {
4094 dev_err(&pdev->dev, "get hw capabilty error, ret = %d.\n", ret);
4095 return ret;
4096 }
4097
4098 ret = hclge_configure(hdev);
4099 if (ret) {
4100 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4101 return ret;
4102 }
4103
4104 if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4105 ret = hclge_init_msix(hdev);
4106 else
4107 ret = hclge_init_msi(hdev);
4108 if (ret) {
4109 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4110 return ret;
4111 }
4112
4113 ret = hclge_alloc_tqps(hdev);
4114 if (ret) {
4115 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4116 return ret;
4117 }
4118
4119 ret = hclge_alloc_vport(hdev);
4120 if (ret) {
4121 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4122 return ret;
4123 }
4124
4125 ret = hclge_mac_init(hdev);
4126 if (ret) {
4127 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4128 return ret;
4129 }
4130 ret = hclge_buffer_alloc(hdev);
4131 if (ret) {
4132 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4133 return ret;
4134 }
4135
4136 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4137 if (ret) {
4138 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4139 return ret;
4140 }
4141
4142 ret = hclge_rss_init_hw(hdev);
4143 if (ret) {
4144 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4145 return ret;
4146 }
4147
4148 ret = hclge_init_vlan_config(hdev);
4149 if (ret) {
4150 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4151 return ret;
4152 }
4153
4154 ret = hclge_tm_schd_init(hdev);
4155 if (ret) {
4156 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4157 return ret;
4158 }
4159
4160 setup_timer(&hdev->service_timer, hclge_service_timer,
4161 (unsigned long)hdev);
4162 INIT_WORK(&hdev->service_task, hclge_service_task);
4163
4164 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4165 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4166
4167 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4168 return 0;
4169
4170err_cmd_init:
4171 pci_release_regions(pdev);
4172err_pci_init:
4173 pci_set_drvdata(pdev, NULL);
4174err_hclge_dev:
4175 return ret;
4176}
4177
4178static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4179{
4180 struct hclge_dev *hdev = ae_dev->priv;
4181 struct hclge_mac *mac = &hdev->hw.mac;
4182
4183 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4184
4185#ifdef CONFIG_PCI_IOV
4186 hclge_disable_sriov(hdev);
4187#endif
4188
4189 if (hdev->service_timer.data)
4190 del_timer_sync(&hdev->service_timer);
4191 if (hdev->service_task.func)
4192 cancel_work_sync(&hdev->service_task);
4193
4194 if (mac->phydev)
4195 mdiobus_unregister(mac->mdio_bus);
4196
4197 hclge_destroy_cmd_queue(&hdev->hw);
4198 hclge_pci_uninit(hdev);
4199 ae_dev->priv = NULL;
4200}
4201
4202static const struct hnae3_ae_ops hclge_ops = {
4203 .init_ae_dev = hclge_init_ae_dev,
4204 .uninit_ae_dev = hclge_uninit_ae_dev,
4205 .init_client_instance = hclge_init_client_instance,
4206 .uninit_client_instance = hclge_uninit_client_instance,
4207 .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4208 .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4209 .get_vector = hclge_get_vector,
4210 .set_promisc_mode = hclge_set_promisc_mode,
4211 .start = hclge_ae_start,
4212 .stop = hclge_ae_stop,
4213 .get_status = hclge_get_status,
4214 .get_ksettings_an_result = hclge_get_ksettings_an_result,
4215 .update_speed_duplex_h = hclge_update_speed_duplex_h,
4216 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4217 .get_media_type = hclge_get_media_type,
4218 .get_rss_key_size = hclge_get_rss_key_size,
4219 .get_rss_indir_size = hclge_get_rss_indir_size,
4220 .get_rss = hclge_get_rss,
4221 .set_rss = hclge_set_rss,
4222 .get_tc_size = hclge_get_tc_size,
4223 .get_mac_addr = hclge_get_mac_addr,
4224 .set_mac_addr = hclge_set_mac_addr,
4225 .add_uc_addr = hclge_add_uc_addr,
4226 .rm_uc_addr = hclge_rm_uc_addr,
4227 .add_mc_addr = hclge_add_mc_addr,
4228 .rm_mc_addr = hclge_rm_mc_addr,
4229 .set_autoneg = hclge_set_autoneg,
4230 .get_autoneg = hclge_get_autoneg,
4231 .get_pauseparam = hclge_get_pauseparam,
4232 .set_mtu = hclge_set_mtu,
4233 .reset_queue = hclge_reset_tqp,
4234 .get_stats = hclge_get_stats,
4235 .update_stats = hclge_update_stats,
4236 .get_strings = hclge_get_strings,
4237 .get_sset_count = hclge_get_sset_count,
4238 .get_fw_version = hclge_get_fw_version,
4239 .get_mdix_mode = hclge_get_mdix_mode,
4240 .set_vlan_filter = hclge_set_port_vlan_filter,
4241 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4242};
4243
4244static struct hnae3_ae_algo ae_algo = {
4245 .ops = &hclge_ops,
4246 .name = HCLGE_NAME,
4247 .pdev_id_table = ae_algo_pci_tbl,
4248};
4249
4250static int hclge_init(void)
4251{
4252 pr_info("%s is initializing\n", HCLGE_NAME);
4253
4254 return hnae3_register_ae_algo(&ae_algo);
4255}
4256
4257static void hclge_exit(void)
4258{
4259 hnae3_unregister_ae_algo(&ae_algo);
4260}
4261module_init(hclge_init);
4262module_exit(hclge_exit);
4263
4264MODULE_LICENSE("GPL");
4265MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4266MODULE_DESCRIPTION("HCLGE Driver");
4267MODULE_VERSION(HCLGE_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
new file mode 100644
index 000000000000..edb10ad075eb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -0,0 +1,519 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __HCLGE_MAIN_H
11#define __HCLGE_MAIN_H
12#include <linux/fs.h>
13#include <linux/types.h>
14#include <linux/phy.h>
15#include "hclge_cmd.h"
16#include "hnae3.h"
17
18#define HCLGE_MOD_VERSION "v1.0"
19#define HCLGE_DRIVER_NAME "hclge"
20
21#define HCLGE_INVALID_VPORT 0xffff
22
23#define HCLGE_ROCE_VECTOR_OFFSET 96
24
25#define HCLGE_PF_CFG_BLOCK_SIZE 32
26#define HCLGE_PF_CFG_DESC_NUM \
27 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
28
29#define HCLGE_VECTOR_REG_BASE 0x20000
30
31#define HCLGE_VECTOR_REG_OFFSET 0x4
32#define HCLGE_VECTOR_VF_OFFSET 0x100000
33
34#define HCLGE_RSS_IND_TBL_SIZE 512
35#define HCLGE_RSS_SET_BITMAP_MSK 0xffff
36#define HCLGE_RSS_KEY_SIZE 40
37#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
38#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
39#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
40#define HCLGE_RSS_HASH_ALGO_MASK 0xf
41#define HCLGE_RSS_CFG_TBL_NUM \
42 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
43
44#define HCLGE_RSS_TC_SIZE_0 1
45#define HCLGE_RSS_TC_SIZE_1 2
46#define HCLGE_RSS_TC_SIZE_2 4
47#define HCLGE_RSS_TC_SIZE_3 8
48#define HCLGE_RSS_TC_SIZE_4 16
49#define HCLGE_RSS_TC_SIZE_5 32
50#define HCLGE_RSS_TC_SIZE_6 64
51#define HCLGE_RSS_TC_SIZE_7 128
52
53#define HCLGE_TQP_RESET_TRY_TIMES 10
54
55#define HCLGE_PHY_PAGE_MDIX 0
56#define HCLGE_PHY_PAGE_COPPER 0
57
58/* Page Selection Reg. */
59#define HCLGE_PHY_PAGE_REG 22
60
61/* Copper Specific Control Register */
62#define HCLGE_PHY_CSC_REG 16
63
64/* Copper Specific Status Register */
65#define HCLGE_PHY_CSS_REG 17
66
67#define HCLGE_PHY_MDIX_CTRL_S (5)
68#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S)
69
70#define HCLGE_PHY_MDIX_STATUS_B (6)
71#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
72
73enum HCLGE_DEV_STATE {
74 HCLGE_STATE_REINITING,
75 HCLGE_STATE_DOWN,
76 HCLGE_STATE_DISABLED,
77 HCLGE_STATE_REMOVING,
78 HCLGE_STATE_SERVICE_INITED,
79 HCLGE_STATE_SERVICE_SCHED,
80 HCLGE_STATE_MBX_HANDLING,
81 HCLGE_STATE_MBX_IRQ,
82 HCLGE_STATE_MAX
83};
84
85#define HCLGE_MPF_ENBALE 1
86struct hclge_caps {
87 u16 num_tqp;
88 u16 num_buffer_cell;
89 u32 flag;
90 u16 vmdq;
91};
92
93enum HCLGE_MAC_SPEED {
94 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
95 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
96 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
97 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
98 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
99 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
100 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
101 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
102};
103
104enum HCLGE_MAC_DUPLEX {
105 HCLGE_MAC_HALF,
106 HCLGE_MAC_FULL
107};
108
109enum hclge_mta_dmac_sel_type {
110 HCLGE_MAC_ADDR_47_36,
111 HCLGE_MAC_ADDR_46_35,
112 HCLGE_MAC_ADDR_45_34,
113 HCLGE_MAC_ADDR_44_33,
114};
115
116struct hclge_mac {
117 u8 phy_addr;
118 u8 flag;
119 u8 media_type;
120 u8 mac_addr[ETH_ALEN];
121 u8 autoneg;
122 u8 duplex;
123 u32 speed;
124 int link; /* store the link status of mac & phy (if phy exit)*/
125 struct phy_device *phydev;
126 struct mii_bus *mdio_bus;
127 phy_interface_t phy_if;
128};
129
130struct hclge_hw {
131 void __iomem *io_base;
132 struct hclge_mac mac;
133 int num_vec;
134 struct hclge_cmq cmq;
135 struct hclge_caps caps;
136 void *back;
137};
138
139/* TQP stats */
140struct hlcge_tqp_stats {
141 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
142 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
143 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
144 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
145};
146
147struct hclge_tqp {
148 struct device *dev; /* Device for DMA mapping */
149 struct hnae3_queue q;
150 struct hlcge_tqp_stats tqp_stats;
151 u16 index; /* Global index in a NIC controller */
152
153 bool alloced;
154};
155
156enum hclge_fc_mode {
157 HCLGE_FC_NONE,
158 HCLGE_FC_RX_PAUSE,
159 HCLGE_FC_TX_PAUSE,
160 HCLGE_FC_FULL,
161 HCLGE_FC_PFC,
162 HCLGE_FC_DEFAULT
163};
164
165#define HCLGE_PG_NUM 4
166#define HCLGE_SCH_MODE_SP 0
167#define HCLGE_SCH_MODE_DWRR 1
168struct hclge_pg_info {
169 u8 pg_id;
170 u8 pg_sch_mode; /* 0: sp; 1: dwrr */
171 u8 tc_bit_map;
172 u32 bw_limit;
173 u8 tc_dwrr[HNAE3_MAX_TC];
174};
175
176struct hclge_tc_info {
177 u8 tc_id;
178 u8 tc_sch_mode; /* 0: sp; 1: dwrr */
179 u8 up;
180 u8 pgid;
181 u32 bw_limit;
182};
183
184struct hclge_cfg {
185 u8 vmdq_vport_num;
186 u8 tc_num;
187 u16 tqp_desc_num;
188 u16 rx_buf_len;
189 u8 phy_addr;
190 u8 media_type;
191 u8 mac_addr[ETH_ALEN];
192 u8 default_speed;
193 u32 numa_node_map;
194};
195
196struct hclge_tm_info {
197 u8 num_tc;
198 u8 num_pg; /* It must be 1 if vNET-Base schd */
199 u8 pg_dwrr[HCLGE_PG_NUM];
200 struct hclge_pg_info pg_info[HCLGE_PG_NUM];
201 struct hclge_tc_info tc_info[HNAE3_MAX_TC];
202 enum hclge_fc_mode fc_mode;
203 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
204};
205
206struct hclge_comm_stats_str {
207 char desc[ETH_GSTRING_LEN];
208 unsigned long offset;
209};
210
211/* all 64bit stats, opcode id: 0x0030 */
212struct hclge_64_bit_stats {
213 /* query_igu_stat */
214 u64 igu_rx_oversize_pkt;
215 u64 igu_rx_undersize_pkt;
216 u64 igu_rx_out_all_pkt;
217 u64 igu_rx_uni_pkt;
218 u64 igu_rx_multi_pkt;
219 u64 igu_rx_broad_pkt;
220 u64 rsv0;
221
222 /* query_egu_stat */
223 u64 egu_tx_out_all_pkt;
224 u64 egu_tx_uni_pkt;
225 u64 egu_tx_multi_pkt;
226 u64 egu_tx_broad_pkt;
227
228 /* ssu_ppp packet stats */
229 u64 ssu_ppp_mac_key_num;
230 u64 ssu_ppp_host_key_num;
231 u64 ppp_ssu_mac_rlt_num;
232 u64 ppp_ssu_host_rlt_num;
233
234 /* ssu_tx_in_out_dfx_stats */
235 u64 ssu_tx_in_num;
236 u64 ssu_tx_out_num;
237 /* ssu_rx_in_out_dfx_stats */
238 u64 ssu_rx_in_num;
239 u64 ssu_rx_out_num;
240};
241
242/* all 32bit stats, opcode id: 0x0031 */
243struct hclge_32_bit_stats {
244 u64 igu_rx_err_pkt;
245 u64 igu_rx_no_eof_pkt;
246 u64 igu_rx_no_sof_pkt;
247 u64 egu_tx_1588_pkt;
248 u64 egu_tx_err_pkt;
249 u64 ssu_full_drop_num;
250 u64 ssu_part_drop_num;
251 u64 ppp_key_drop_num;
252 u64 ppp_rlt_drop_num;
253 u64 ssu_key_drop_num;
254 u64 pkt_curr_buf_cnt;
255 u64 qcn_fb_rcv_cnt;
256 u64 qcn_fb_drop_cnt;
257 u64 qcn_fb_invaild_cnt;
258 u64 rsv0;
259 u64 rx_packet_tc0_in_cnt;
260 u64 rx_packet_tc1_in_cnt;
261 u64 rx_packet_tc2_in_cnt;
262 u64 rx_packet_tc3_in_cnt;
263 u64 rx_packet_tc4_in_cnt;
264 u64 rx_packet_tc5_in_cnt;
265 u64 rx_packet_tc6_in_cnt;
266 u64 rx_packet_tc7_in_cnt;
267 u64 rx_packet_tc0_out_cnt;
268 u64 rx_packet_tc1_out_cnt;
269 u64 rx_packet_tc2_out_cnt;
270 u64 rx_packet_tc3_out_cnt;
271 u64 rx_packet_tc4_out_cnt;
272 u64 rx_packet_tc5_out_cnt;
273 u64 rx_packet_tc6_out_cnt;
274 u64 rx_packet_tc7_out_cnt;
275
276 /* Tx packet level statistics */
277 u64 tx_packet_tc0_in_cnt;
278 u64 tx_packet_tc1_in_cnt;
279 u64 tx_packet_tc2_in_cnt;
280 u64 tx_packet_tc3_in_cnt;
281 u64 tx_packet_tc4_in_cnt;
282 u64 tx_packet_tc5_in_cnt;
283 u64 tx_packet_tc6_in_cnt;
284 u64 tx_packet_tc7_in_cnt;
285 u64 tx_packet_tc0_out_cnt;
286 u64 tx_packet_tc1_out_cnt;
287 u64 tx_packet_tc2_out_cnt;
288 u64 tx_packet_tc3_out_cnt;
289 u64 tx_packet_tc4_out_cnt;
290 u64 tx_packet_tc5_out_cnt;
291 u64 tx_packet_tc6_out_cnt;
292 u64 tx_packet_tc7_out_cnt;
293
294 /* packet buffer statistics */
295 u64 pkt_curr_buf_tc0_cnt;
296 u64 pkt_curr_buf_tc1_cnt;
297 u64 pkt_curr_buf_tc2_cnt;
298 u64 pkt_curr_buf_tc3_cnt;
299 u64 pkt_curr_buf_tc4_cnt;
300 u64 pkt_curr_buf_tc5_cnt;
301 u64 pkt_curr_buf_tc6_cnt;
302 u64 pkt_curr_buf_tc7_cnt;
303
304 u64 mb_uncopy_num;
305 u64 lo_pri_unicast_rlt_drop_num;
306 u64 hi_pri_multicast_rlt_drop_num;
307 u64 lo_pri_multicast_rlt_drop_num;
308 u64 rx_oq_drop_pkt_cnt;
309 u64 tx_oq_drop_pkt_cnt;
310 u64 nic_l2_err_drop_pkt_cnt;
311 u64 roc_l2_err_drop_pkt_cnt;
312};
313
314/* mac stats ,opcode id: 0x0032 */
315struct hclge_mac_stats {
316 u64 mac_tx_mac_pause_num;
317 u64 mac_rx_mac_pause_num;
318 u64 mac_tx_pfc_pri0_pkt_num;
319 u64 mac_tx_pfc_pri1_pkt_num;
320 u64 mac_tx_pfc_pri2_pkt_num;
321 u64 mac_tx_pfc_pri3_pkt_num;
322 u64 mac_tx_pfc_pri4_pkt_num;
323 u64 mac_tx_pfc_pri5_pkt_num;
324 u64 mac_tx_pfc_pri6_pkt_num;
325 u64 mac_tx_pfc_pri7_pkt_num;
326 u64 mac_rx_pfc_pri0_pkt_num;
327 u64 mac_rx_pfc_pri1_pkt_num;
328 u64 mac_rx_pfc_pri2_pkt_num;
329 u64 mac_rx_pfc_pri3_pkt_num;
330 u64 mac_rx_pfc_pri4_pkt_num;
331 u64 mac_rx_pfc_pri5_pkt_num;
332 u64 mac_rx_pfc_pri6_pkt_num;
333 u64 mac_rx_pfc_pri7_pkt_num;
334 u64 mac_tx_total_pkt_num;
335 u64 mac_tx_total_oct_num;
336 u64 mac_tx_good_pkt_num;
337 u64 mac_tx_bad_pkt_num;
338 u64 mac_tx_good_oct_num;
339 u64 mac_tx_bad_oct_num;
340 u64 mac_tx_uni_pkt_num;
341 u64 mac_tx_multi_pkt_num;
342 u64 mac_tx_broad_pkt_num;
343 u64 mac_tx_undersize_pkt_num;
344 u64 mac_tx_overrsize_pkt_num;
345 u64 mac_tx_64_oct_pkt_num;
346 u64 mac_tx_65_127_oct_pkt_num;
347 u64 mac_tx_128_255_oct_pkt_num;
348 u64 mac_tx_256_511_oct_pkt_num;
349 u64 mac_tx_512_1023_oct_pkt_num;
350 u64 mac_tx_1024_1518_oct_pkt_num;
351 u64 mac_tx_1519_max_oct_pkt_num;
352 u64 mac_rx_total_pkt_num;
353 u64 mac_rx_total_oct_num;
354 u64 mac_rx_good_pkt_num;
355 u64 mac_rx_bad_pkt_num;
356 u64 mac_rx_good_oct_num;
357 u64 mac_rx_bad_oct_num;
358 u64 mac_rx_uni_pkt_num;
359 u64 mac_rx_multi_pkt_num;
360 u64 mac_rx_broad_pkt_num;
361 u64 mac_rx_undersize_pkt_num;
362 u64 mac_rx_overrsize_pkt_num;
363 u64 mac_rx_64_oct_pkt_num;
364 u64 mac_rx_65_127_oct_pkt_num;
365 u64 mac_rx_128_255_oct_pkt_num;
366 u64 mac_rx_256_511_oct_pkt_num;
367 u64 mac_rx_512_1023_oct_pkt_num;
368 u64 mac_rx_1024_1518_oct_pkt_num;
369 u64 mac_rx_1519_max_oct_pkt_num;
370
371 u64 mac_trans_fragment_pkt_num;
372 u64 mac_trans_undermin_pkt_num;
373 u64 mac_trans_jabber_pkt_num;
374 u64 mac_trans_err_all_pkt_num;
375 u64 mac_trans_from_app_good_pkt_num;
376 u64 mac_trans_from_app_bad_pkt_num;
377 u64 mac_rcv_fragment_pkt_num;
378 u64 mac_rcv_undermin_pkt_num;
379 u64 mac_rcv_jabber_pkt_num;
380 u64 mac_rcv_fcs_err_pkt_num;
381 u64 mac_rcv_send_app_good_pkt_num;
382 u64 mac_rcv_send_app_bad_pkt_num;
383};
384
385struct hclge_hw_stats {
386 struct hclge_mac_stats mac_stats;
387 struct hclge_64_bit_stats all_64_bit_stats;
388 struct hclge_32_bit_stats all_32_bit_stats;
389};
390
391struct hclge_dev {
392 struct pci_dev *pdev;
393 struct hnae3_ae_dev *ae_dev;
394 struct hclge_hw hw;
395 struct hclge_hw_stats hw_stats;
396 unsigned long state;
397
398 u32 fw_version;
399 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
400 u16 num_tqps; /* Num task queue pairs of this PF */
401 u16 num_req_vfs; /* Num VFs requested for this PF */
402
403 u16 num_roce_msix; /* Num of roce vectors for this PF */
404 int roce_base_vector;
405
406 /* Base task tqp physical id of this PF */
407 u16 base_tqp_pid;
408 u16 alloc_rss_size; /* Allocated RSS task queue */
409 u16 rss_size_max; /* HW defined max RSS task queue */
410
411 /* Num of guaranteed filters for this PF */
412 u16 fdir_pf_filter_count;
413 u16 num_alloc_vport; /* Num vports this driver supports */
414 u32 numa_node_mask;
415 u16 rx_buf_len;
416 u16 num_desc;
417 u8 hw_tc_map;
418 u8 tc_num_last_time;
419 enum hclge_fc_mode fc_mode_last_time;
420
421#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
422#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
423 u8 tx_sch_mode;
424
425 u8 default_up;
426 struct hclge_tm_info tm_info;
427
428 u16 num_msi;
429 u16 num_msi_left;
430 u16 num_msi_used;
431 u32 base_msi_vector;
432 struct msix_entry *msix_entries;
433 u16 *vector_status;
434
435 u16 pending_udp_bitmap;
436
437 u16 rx_itr_default;
438 u16 tx_itr_default;
439
440 u16 adminq_work_limit; /* Num of admin receive queue desc to process */
441 unsigned long service_timer_period;
442 unsigned long service_timer_previous;
443 struct timer_list service_timer;
444 struct work_struct service_task;
445
446 bool cur_promisc;
447 int num_alloc_vfs; /* Actual number of VFs allocated */
448
449 struct hclge_tqp *htqp;
450 struct hclge_vport *vport;
451
452 struct dentry *hclge_dbgfs;
453
454 struct hnae3_client *nic_client;
455 struct hnae3_client *roce_client;
456
457#define HCLGE_FLAG_USE_MSI 0x00000001
458#define HCLGE_FLAG_USE_MSIX 0x00000002
459#define HCLGE_FLAG_MAIN 0x00000004
460#define HCLGE_FLAG_DCB_CAPABLE 0x00000008
461#define HCLGE_FLAG_DCB_ENABLE 0x00000010
462 u32 flag;
463
464 u32 pkt_buf_size; /* Total pf buf size for tx/rx */
465 u32 mps; /* Max packet size */
466 struct hclge_priv_buf *priv_buf;
467 struct hclge_shared_buf s_buf;
468
469 enum hclge_mta_dmac_sel_type mta_mac_sel_type;
470 bool enable_mta; /* Mutilcast filter enable */
471 bool accept_mta_mc; /* Whether accept mta filter multicast */
472};
473
474struct hclge_vport {
475 u16 alloc_tqps; /* Allocated Tx/Rx queues */
476
477 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
478 /* User configured lookup table entries */
479 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
480
481 u16 qs_offset;
482 u16 bw_limit; /* VSI BW Limit (0 = disabled) */
483 u8 dwrr;
484
485 int vport_id;
486 struct hclge_dev *back; /* Back reference to associated dev */
487 struct hnae3_handle nic;
488 struct hnae3_handle roce;
489};
490
491void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
492 bool en_mc, bool en_bc, int vport_id);
493
494int hclge_add_uc_addr_common(struct hclge_vport *vport,
495 const unsigned char *addr);
496int hclge_rm_uc_addr_common(struct hclge_vport *vport,
497 const unsigned char *addr);
498int hclge_add_mc_addr_common(struct hclge_vport *vport,
499 const unsigned char *addr);
500int hclge_rm_mc_addr_common(struct hclge_vport *vport,
501 const unsigned char *addr);
502
503int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
504 u8 func_id,
505 bool enable);
506struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
507int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
508 struct hnae3_ring_chain_node *ring_chain);
509static inline int hclge_get_queue_id(struct hnae3_queue *queue)
510{
511 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
512
513 return tqp->index;
514}
515
516int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
517int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
518 bool is_kill, u16 vlan, u8 qos, __be16 proto);
519#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
new file mode 100644
index 000000000000..a2add8bb1945
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/kernel.h>
12
13#include "hclge_cmd.h"
14#include "hclge_main.h"
15#include "hclge_mdio.h"
16
17enum hclge_mdio_c22_op_seq {
18 HCLGE_MDIO_C22_WRITE = 1,
19 HCLGE_MDIO_C22_READ = 2
20};
21
22#define HCLGE_MDIO_CTRL_START_B 0
23#define HCLGE_MDIO_CTRL_ST_S 1
24#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S)
25#define HCLGE_MDIO_CTRL_OP_S 3
26#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S)
27
28#define HCLGE_MDIO_PHYID_S 0
29#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S)
30
31#define HCLGE_MDIO_PHYREG_S 0
32#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S)
33
34#define HCLGE_MDIO_STA_B 0
35
36struct hclge_mdio_cfg_cmd {
37 u8 ctrl_bit;
38 u8 phyid;
39 u8 phyad;
40 u8 rsvd;
41 __le16 reserve;
42 __le16 data_wr;
43 __le16 data_rd;
44 __le16 sta;
45};
46
47static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
48 u16 data)
49{
50 struct hclge_mdio_cfg_cmd *mdio_cmd;
51 struct hclge_dev *hdev = bus->priv;
52 struct hclge_desc desc;
53 int ret;
54
55 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
56
57 mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
58
59 hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
60 HCLGE_MDIO_PHYID_S, phyid);
61 hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
62 HCLGE_MDIO_PHYREG_S, regnum);
63
64 hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
65 hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
66 HCLGE_MDIO_CTRL_ST_S, 1);
67 hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
68 HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
69
70 mdio_cmd->data_wr = cpu_to_le16(data);
71
72 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
73 if (ret) {
74 dev_err(&hdev->pdev->dev,
75 "mdio write fail when sending cmd, status is %d.\n",
76 ret);
77 return ret;
78 }
79
80 return 0;
81}
82
83static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
84{
85 struct hclge_mdio_cfg_cmd *mdio_cmd;
86 struct hclge_dev *hdev = bus->priv;
87 struct hclge_desc desc;
88 int ret;
89
90 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
91
92 mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
93
94 hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
95 HCLGE_MDIO_PHYID_S, phyid);
96 hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
97 HCLGE_MDIO_PHYREG_S, regnum);
98
99 hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
100 hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
101 HCLGE_MDIO_CTRL_ST_S, 1);
102 hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
103 HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
104
105 /* Read out phy data */
106 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
107 if (ret) {
108 dev_err(&hdev->pdev->dev,
109 "mdio read fail when get data, status is %d.\n",
110 ret);
111 return ret;
112 }
113
114 if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
115 dev_err(&hdev->pdev->dev, "mdio read data error\n");
116 return -EIO;
117 }
118
119 return le16_to_cpu(mdio_cmd->data_rd);
120}
121
122int hclge_mac_mdio_config(struct hclge_dev *hdev)
123{
124 struct hclge_mac *mac = &hdev->hw.mac;
125 struct phy_device *phydev;
126 struct mii_bus *mdio_bus;
127 int ret;
128
129 if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR)
130 return 0;
131
132 mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
133 if (!mdio_bus)
134 return -ENOMEM;
135
136 mdio_bus->name = "hisilicon MII bus";
137 mdio_bus->read = hclge_mdio_read;
138 mdio_bus->write = hclge_mdio_write;
139 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii",
140 dev_name(&hdev->pdev->dev));
141
142 mdio_bus->parent = &hdev->pdev->dev;
143 mdio_bus->priv = hdev;
144 mdio_bus->phy_mask = ~(1 << mac->phy_addr);
145 ret = mdiobus_register(mdio_bus);
146 if (ret) {
147 dev_err(mdio_bus->parent,
148 "Failed to register MDIO bus ret = %#x\n", ret);
149 return ret;
150 }
151
152 phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr);
153 if (!phydev || IS_ERR(phydev)) {
154 dev_err(mdio_bus->parent, "Failed to get phy device\n");
155 mdiobus_unregister(mdio_bus);
156 return -EIO;
157 }
158
159 mac->phydev = phydev;
160 mac->mdio_bus = mdio_bus;
161
162 return 0;
163}
164
165static void hclge_mac_adjust_link(struct net_device *netdev)
166{
167 struct hnae3_handle *h = *((void **)netdev_priv(netdev));
168 struct hclge_vport *vport = hclge_get_vport(h);
169 struct hclge_dev *hdev = vport->back;
170 int duplex, speed;
171 int ret;
172
173 speed = netdev->phydev->speed;
174 duplex = netdev->phydev->duplex;
175
176 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
177 if (ret)
178 netdev_err(netdev, "failed to adjust link.\n");
179}
180
181int hclge_mac_start_phy(struct hclge_dev *hdev)
182{
183 struct net_device *netdev = hdev->vport[0].nic.netdev;
184 struct phy_device *phydev = hdev->hw.mac.phydev;
185 int ret;
186
187 if (!phydev)
188 return 0;
189
190 ret = phy_connect_direct(netdev, phydev,
191 hclge_mac_adjust_link,
192 PHY_INTERFACE_MODE_SGMII);
193 if (ret) {
194 netdev_err(netdev, "phy_connect_direct err.\n");
195 return ret;
196 }
197
198 phy_start(phydev);
199
200 return 0;
201}
202
203void hclge_mac_stop_phy(struct hclge_dev *hdev)
204{
205 struct net_device *netdev = hdev->vport[0].nic.netdev;
206 struct phy_device *phydev = netdev->phydev;
207
208 if (!phydev)
209 return;
210
211 phy_stop(phydev);
212 phy_disconnect(phydev);
213}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
new file mode 100644
index 000000000000..c5e91cfb8f2c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __HCLGE_MDIO_H
11#define __HCLGE_MDIO_H
12
13int hclge_mac_mdio_config(struct hclge_dev *hdev);
14int hclge_mac_start_phy(struct hclge_dev *hdev);
15void hclge_mac_stop_phy(struct hclge_dev *hdev);
16
17#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
new file mode 100644
index 000000000000..1c577d268f00
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/etherdevice.h>
11
12#include "hclge_cmd.h"
13#include "hclge_main.h"
14#include "hclge_tm.h"
15
16enum hclge_shaper_level {
17 HCLGE_SHAPER_LVL_PRI = 0,
18 HCLGE_SHAPER_LVL_PG = 1,
19 HCLGE_SHAPER_LVL_PORT = 2,
20 HCLGE_SHAPER_LVL_QSET = 3,
21 HCLGE_SHAPER_LVL_CNT = 4,
22 HCLGE_SHAPER_LVL_VF = 0,
23 HCLGE_SHAPER_LVL_PF = 1,
24};
25
26#define HCLGE_SHAPER_BS_U_DEF 1
27#define HCLGE_SHAPER_BS_S_DEF 4
28
29#define HCLGE_ETHER_MAX_RATE 100000
30
31/* hclge_shaper_para_calc: calculate ir parameter for the shaper
32 * @ir: Rate to be config, its unit is Mbps
33 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34 * @ir_b: IR_B parameter of IR shaper
35 * @ir_u: IR_U parameter of IR shaper
36 * @ir_s: IR_S parameter of IR shaper
37 *
38 * the formula:
39 *
40 * IR_b * (2 ^ IR_u) * 8
41 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
42 * Tick * (2 ^ IR_s)
43 *
44 * @return: 0: calculate sucessful, negative: fail
45 */
46static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 u8 *ir_b, u8 *ir_u, u8 *ir_s)
48{
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
54 };
55 u8 ir_u_calc = 0, ir_s_calc = 0;
56 u32 ir_calc;
57 u32 tick;
58
59 /* Calc tick */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 return -EINVAL;
62
63 tick = tick_array[shaper_level];
64
65 /**
66 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 * the formula is changed to:
68 * 126 * 1 * 8
69 * ir_calc = ---------------- * 1000
70 * tick * 1
71 */
72 ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73
74 if (ir_calc == ir) {
75 *ir_b = 126;
76 *ir_u = 0;
77 *ir_s = 0;
78
79 return 0;
80 } else if (ir_calc > ir) {
81 /* Increasing the denominator to select ir_s value */
82 while (ir_calc > ir) {
83 ir_s_calc++;
84 ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 }
86
87 if (ir_calc == ir)
88 *ir_b = 126;
89 else
90 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 } else {
92 /* Increasing the numerator to select ir_u value */
93 u32 numerator;
94
95 while (ir_calc < ir) {
96 ir_u_calc++;
97 numerator = 1008000 * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
99 }
100
101 if (ir_calc == ir) {
102 *ir_b = 126;
103 } else {
104 u32 denominator = (8000 * (1 << --ir_u_calc));
105 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 }
107 }
108
109 *ir_u = ir_u_calc;
110 *ir_s = ir_s_calc;
111
112 return 0;
113}
114
115static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
116{
117 struct hclge_desc desc;
118
119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120
121 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123
124 return hclge_cmd_send(&hdev->hw, &desc, 1);
125}
126
127static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
128{
129 u8 tc;
130
131 for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
132 if (hdev->tm_info.tc_info[tc].up == pri_id)
133 break;
134
135 if (tc >= hdev->tm_info.num_tc)
136 return -EINVAL;
137
138 /**
139 * the register for priority has four bytes, the first bytes includes
140 * priority0 and priority1, the higher 4bit stands for priority1
141 * while the lower 4bit stands for priority0, as below:
142 * first byte: | pri_1 | pri_0 |
143 * second byte: | pri_3 | pri_2 |
144 * third byte: | pri_5 | pri_4 |
145 * fourth byte: | pri_7 | pri_6 |
146 */
147 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
148
149 return 0;
150}
151
152static int hclge_up_to_tc_map(struct hclge_dev *hdev)
153{
154 struct hclge_desc desc;
155 u8 *pri = (u8 *)desc.data;
156 u8 pri_id;
157 int ret;
158
159 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
160
161 for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
162 ret = hclge_fill_pri_array(hdev, pri, pri_id);
163 if (ret)
164 return ret;
165 }
166
167 return hclge_cmd_send(&hdev->hw, &desc, 1);
168}
169
170static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
171 u8 pg_id, u8 pri_bit_map)
172{
173 struct hclge_pg_to_pri_link_cmd *map;
174 struct hclge_desc desc;
175
176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
177
178 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
179
180 map->pg_id = pg_id;
181 map->pri_bit_map = pri_bit_map;
182
183 return hclge_cmd_send(&hdev->hw, &desc, 1);
184}
185
186static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
187 u16 qs_id, u8 pri)
188{
189 struct hclge_qs_to_pri_link_cmd *map;
190 struct hclge_desc desc;
191
192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
193
194 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
195
196 map->qs_id = cpu_to_le16(qs_id);
197 map->priority = pri;
198 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
199
200 return hclge_cmd_send(&hdev->hw, &desc, 1);
201}
202
203static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
204 u8 q_id, u16 qs_id)
205{
206 struct hclge_nq_to_qs_link_cmd *map;
207 struct hclge_desc desc;
208
209 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
210
211 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
212
213 map->nq_id = cpu_to_le16(q_id);
214 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
215
216 return hclge_cmd_send(&hdev->hw, &desc, 1);
217}
218
219static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
220 u8 dwrr)
221{
222 struct hclge_pg_weight_cmd *weight;
223 struct hclge_desc desc;
224
225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
226
227 weight = (struct hclge_pg_weight_cmd *)desc.data;
228
229 weight->pg_id = pg_id;
230 weight->dwrr = dwrr;
231
232 return hclge_cmd_send(&hdev->hw, &desc, 1);
233}
234
235static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
236 u8 dwrr)
237{
238 struct hclge_priority_weight_cmd *weight;
239 struct hclge_desc desc;
240
241 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
242
243 weight = (struct hclge_priority_weight_cmd *)desc.data;
244
245 weight->pri_id = pri_id;
246 weight->dwrr = dwrr;
247
248 return hclge_cmd_send(&hdev->hw, &desc, 1);
249}
250
251static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
252 u8 dwrr)
253{
254 struct hclge_qs_weight_cmd *weight;
255 struct hclge_desc desc;
256
257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
258
259 weight = (struct hclge_qs_weight_cmd *)desc.data;
260
261 weight->qs_id = cpu_to_le16(qs_id);
262 weight->dwrr = dwrr;
263
264 return hclge_cmd_send(&hdev->hw, &desc, 1);
265}
266
267static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
268 enum hclge_shap_bucket bucket, u8 pg_id,
269 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
270{
271 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
272 enum hclge_opcode_type opcode;
273 struct hclge_desc desc;
274
275 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
276 HCLGE_OPC_TM_PG_C_SHAPPING;
277 hclge_cmd_setup_basic_desc(&desc, opcode, false);
278
279 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
280
281 shap_cfg_cmd->pg_id = pg_id;
282
283 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
284 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
285 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
286 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
287 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
288
289 return hclge_cmd_send(&hdev->hw, &desc, 1);
290}
291
292static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
293 enum hclge_shap_bucket bucket, u8 pri_id,
294 u8 ir_b, u8 ir_u, u8 ir_s,
295 u8 bs_b, u8 bs_s)
296{
297 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
298 enum hclge_opcode_type opcode;
299 struct hclge_desc desc;
300
301 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
302 HCLGE_OPC_TM_PRI_C_SHAPPING;
303
304 hclge_cmd_setup_basic_desc(&desc, opcode, false);
305
306 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
307
308 shap_cfg_cmd->pri_id = pri_id;
309
310 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
311 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
312 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
313 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
314 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
315
316 return hclge_cmd_send(&hdev->hw, &desc, 1);
317}
318
319static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
320{
321 struct hclge_desc desc;
322
323 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
324
325 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
326 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
327 else
328 desc.data[1] = 0;
329
330 desc.data[0] = cpu_to_le32(pg_id);
331
332 return hclge_cmd_send(&hdev->hw, &desc, 1);
333}
334
335static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
336{
337 struct hclge_desc desc;
338
339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
340
341 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
342 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
343 else
344 desc.data[1] = 0;
345
346 desc.data[0] = cpu_to_le32(pri_id);
347
348 return hclge_cmd_send(&hdev->hw, &desc, 1);
349}
350
351static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id)
352{
353 struct hclge_desc desc;
354
355 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
356
357 if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
358 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
359 else
360 desc.data[1] = 0;
361
362 desc.data[0] = cpu_to_le32(qs_id);
363
364 return hclge_cmd_send(&hdev->hw, &desc, 1);
365}
366
367static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
368{
369 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
370 struct hclge_desc desc;
371
372 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
373 false);
374
375 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
376
377 bp_to_qs_map_cmd->tc_id = tc;
378
379 /* Qset and tc is one by one mapping */
380 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
381
382 return hclge_cmd_send(&hdev->hw, &desc, 1);
383}
384
385static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
386{
387 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
388 struct hclge_dev *hdev = vport->back;
389 u8 i;
390
391 kinfo = &vport->nic.kinfo;
392 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
393 kinfo->num_tc =
394 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
395 kinfo->rss_size
396 = min_t(u16, hdev->rss_size_max,
397 kinfo->num_tqps / kinfo->num_tc);
398 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
399 vport->dwrr = 100; /* 100 percent as init */
400
401 for (i = 0; i < kinfo->num_tc; i++) {
402 if (hdev->hw_tc_map & BIT(i)) {
403 kinfo->tc_info[i].enable = true;
404 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
405 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
406 kinfo->tc_info[i].tc = i;
407 kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
408 } else {
409 /* Set to default queue if TC is disable */
410 kinfo->tc_info[i].enable = false;
411 kinfo->tc_info[i].tqp_offset = 0;
412 kinfo->tc_info[i].tqp_count = 1;
413 kinfo->tc_info[i].tc = 0;
414 kinfo->tc_info[i].up = 0;
415 }
416 }
417}
418
419static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
420{
421 struct hclge_vport *vport = hdev->vport;
422 u32 i;
423
424 for (i = 0; i < hdev->num_alloc_vport; i++) {
425 hclge_tm_vport_tc_info_update(vport);
426
427 vport++;
428 }
429}
430
431static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
432{
433 u8 i;
434
435 for (i = 0; i < hdev->tm_info.num_tc; i++) {
436 hdev->tm_info.tc_info[i].tc_id = i;
437 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
438 hdev->tm_info.tc_info[i].up = i;
439 hdev->tm_info.tc_info[i].pgid = 0;
440 hdev->tm_info.tc_info[i].bw_limit =
441 hdev->tm_info.pg_info[0].bw_limit;
442 }
443
444 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
445}
446
447static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
448{
449 u8 i;
450
451 for (i = 0; i < hdev->tm_info.num_pg; i++) {
452 int k;
453
454 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
455
456 hdev->tm_info.pg_info[i].pg_id = i;
457 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
458
459 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
460
461 if (i != 0)
462 continue;
463
464 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
465 for (k = 0; k < hdev->tm_info.num_tc; k++)
466 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
467 }
468}
469
470static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
471{
472 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
473 (hdev->tm_info.num_pg != 1))
474 return -EINVAL;
475
476 hclge_tm_pg_info_init(hdev);
477
478 hclge_tm_tc_info_init(hdev);
479
480 hclge_tm_vport_info_update(hdev);
481
482 hdev->tm_info.fc_mode = HCLGE_FC_NONE;
483 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
484
485 return 0;
486}
487
488static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
489{
490 int ret;
491 u32 i;
492
493 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
494 return 0;
495
496 for (i = 0; i < hdev->tm_info.num_pg; i++) {
497 /* Cfg mapping */
498 ret = hclge_tm_pg_to_pri_map_cfg(
499 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
500 if (ret)
501 return ret;
502 }
503
504 return 0;
505}
506
507static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
508{
509 u8 ir_u, ir_b, ir_s;
510 int ret;
511 u32 i;
512
513 /* Cfg pg schd */
514 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
515 return 0;
516
517 /* Pg to pri */
518 for (i = 0; i < hdev->tm_info.num_pg; i++) {
519 /* Calc shaper para */
520 ret = hclge_shaper_para_calc(
521 hdev->tm_info.pg_info[i].bw_limit,
522 HCLGE_SHAPER_LVL_PG,
523 &ir_b, &ir_u, &ir_s);
524 if (ret)
525 return ret;
526
527 ret = hclge_tm_pg_shapping_cfg(hdev,
528 HCLGE_TM_SHAP_C_BUCKET, i,
529 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
530 HCLGE_SHAPER_BS_S_DEF);
531 if (ret)
532 return ret;
533
534 ret = hclge_tm_pg_shapping_cfg(hdev,
535 HCLGE_TM_SHAP_P_BUCKET, i,
536 ir_b, ir_u, ir_s,
537 HCLGE_SHAPER_BS_U_DEF,
538 HCLGE_SHAPER_BS_S_DEF);
539 if (ret)
540 return ret;
541 }
542
543 return 0;
544}
545
546static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
547{
548 int ret;
549 u32 i;
550
551 /* cfg pg schd */
552 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
553 return 0;
554
555 /* pg to prio */
556 for (i = 0; i < hdev->tm_info.num_pg; i++) {
557 /* Cfg dwrr */
558 ret = hclge_tm_pg_weight_cfg(hdev, i,
559 hdev->tm_info.pg_dwrr[i]);
560 if (ret)
561 return ret;
562 }
563
564 return 0;
565}
566
567static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
568 struct hclge_vport *vport)
569{
570 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
571 struct hnae3_queue **tqp = kinfo->tqp;
572 struct hnae3_tc_info *v_tc_info;
573 u32 i, j;
574 int ret;
575
576 for (i = 0; i < kinfo->num_tc; i++) {
577 v_tc_info = &kinfo->tc_info[i];
578 for (j = 0; j < v_tc_info->tqp_count; j++) {
579 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
580
581 ret = hclge_tm_q_to_qs_map_cfg(hdev,
582 hclge_get_queue_id(q),
583 vport->qs_offset + i);
584 if (ret)
585 return ret;
586 }
587 }
588
589 return 0;
590}
591
592static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
593{
594 struct hclge_vport *vport = hdev->vport;
595 int ret;
596 u32 i;
597
598 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
599 /* Cfg qs -> pri mapping, one by one mapping */
600 for (i = 0; i < hdev->tm_info.num_tc; i++) {
601 ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i);
602 if (ret)
603 return ret;
604 }
605 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
606 int k;
607 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
608 for (k = 0; k < hdev->num_alloc_vport; k++)
609 for (i = 0; i < HNAE3_MAX_TC; i++) {
610 ret = hclge_tm_qs_to_pri_map_cfg(
611 hdev, vport[k].qs_offset + i, k);
612 if (ret)
613 return ret;
614 }
615 } else {
616 return -EINVAL;
617 }
618
619 /* Cfg q -> qs mapping */
620 for (i = 0; i < hdev->num_alloc_vport; i++) {
621 ret = hclge_vport_q_to_qs_map(hdev, vport);
622 if (ret)
623 return ret;
624
625 vport++;
626 }
627
628 return 0;
629}
630
631static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
632{
633 u8 ir_u, ir_b, ir_s;
634 int ret;
635 u32 i;
636
637 for (i = 0; i < hdev->tm_info.num_tc; i++) {
638 ret = hclge_shaper_para_calc(
639 hdev->tm_info.tc_info[i].bw_limit,
640 HCLGE_SHAPER_LVL_PRI,
641 &ir_b, &ir_u, &ir_s);
642 if (ret)
643 return ret;
644
645 ret = hclge_tm_pri_shapping_cfg(
646 hdev, HCLGE_TM_SHAP_C_BUCKET, i,
647 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
648 HCLGE_SHAPER_BS_S_DEF);
649 if (ret)
650 return ret;
651
652 ret = hclge_tm_pri_shapping_cfg(
653 hdev, HCLGE_TM_SHAP_P_BUCKET, i,
654 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
655 HCLGE_SHAPER_BS_S_DEF);
656 if (ret)
657 return ret;
658 }
659
660 return 0;
661}
662
663static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
664{
665 struct hclge_dev *hdev = vport->back;
666 u8 ir_u, ir_b, ir_s;
667 int ret;
668
669 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
670 &ir_b, &ir_u, &ir_s);
671 if (ret)
672 return ret;
673
674 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
675 vport->vport_id,
676 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
677 HCLGE_SHAPER_BS_S_DEF);
678 if (ret)
679 return ret;
680
681 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
682 vport->vport_id,
683 ir_b, ir_u, ir_s,
684 HCLGE_SHAPER_BS_U_DEF,
685 HCLGE_SHAPER_BS_S_DEF);
686 if (ret)
687 return ret;
688
689 return 0;
690}
691
692static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
693{
694 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
695 struct hclge_dev *hdev = vport->back;
696 struct hnae3_tc_info *v_tc_info;
697 u8 ir_u, ir_b, ir_s;
698 u32 i;
699 int ret;
700
701 for (i = 0; i < kinfo->num_tc; i++) {
702 v_tc_info = &kinfo->tc_info[i];
703 ret = hclge_shaper_para_calc(
704 hdev->tm_info.tc_info[i].bw_limit,
705 HCLGE_SHAPER_LVL_QSET,
706 &ir_b, &ir_u, &ir_s);
707 if (ret)
708 return ret;
709 }
710
711 return 0;
712}
713
714static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
715{
716 struct hclge_vport *vport = hdev->vport;
717 int ret;
718 u32 i;
719
720 /* Need config vport shaper */
721 for (i = 0; i < hdev->num_alloc_vport; i++) {
722 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
723 if (ret)
724 return ret;
725
726 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
727 if (ret)
728 return ret;
729
730 vport++;
731 }
732
733 return 0;
734}
735
736static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
737{
738 int ret;
739
740 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
741 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
742 if (ret)
743 return ret;
744 } else {
745 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
746 if (ret)
747 return ret;
748 }
749
750 return 0;
751}
752
753static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
754{
755 struct hclge_pg_info *pg_info;
756 u8 dwrr;
757 int ret;
758 u32 i;
759
760 for (i = 0; i < hdev->tm_info.num_tc; i++) {
761 pg_info =
762 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
763 dwrr = pg_info->tc_dwrr[i];
764
765 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
766 if (ret)
767 return ret;
768
769 ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr);
770 if (ret)
771 return ret;
772 }
773
774 return 0;
775}
776
777static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
778{
779 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
780 struct hclge_dev *hdev = vport->back;
781 int ret;
782 u8 i;
783
784 /* Vf dwrr */
785 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
786 if (ret)
787 return ret;
788
789 /* Qset dwrr */
790 for (i = 0; i < kinfo->num_tc; i++) {
791 ret = hclge_tm_qs_weight_cfg(
792 hdev, vport->qs_offset + i,
793 hdev->tm_info.pg_info[0].tc_dwrr[i]);
794 if (ret)
795 return ret;
796 }
797
798 return 0;
799}
800
801static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
802{
803 struct hclge_vport *vport = hdev->vport;
804 int ret;
805 u32 i;
806
807 for (i = 0; i < hdev->num_alloc_vport; i++) {
808 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
809 if (ret)
810 return ret;
811
812 vport++;
813 }
814
815 return 0;
816}
817
818static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
819{
820 int ret;
821
822 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
823 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
824 if (ret)
825 return ret;
826 } else {
827 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
828 if (ret)
829 return ret;
830 }
831
832 return 0;
833}
834
835static int hclge_tm_map_cfg(struct hclge_dev *hdev)
836{
837 int ret;
838
839 ret = hclge_tm_pg_to_pri_map(hdev);
840 if (ret)
841 return ret;
842
843 return hclge_tm_pri_q_qs_cfg(hdev);
844}
845
846static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
847{
848 int ret;
849
850 ret = hclge_tm_pg_shaper_cfg(hdev);
851 if (ret)
852 return ret;
853
854 return hclge_tm_pri_shaper_cfg(hdev);
855}
856
857int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
858{
859 int ret;
860
861 ret = hclge_tm_pg_dwrr_cfg(hdev);
862 if (ret)
863 return ret;
864
865 return hclge_tm_pri_dwrr_cfg(hdev);
866}
867
868static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
869{
870 int ret;
871 u8 i;
872
873 /* Only being config on TC-Based scheduler mode */
874 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
875 return 0;
876
877 for (i = 0; i < hdev->tm_info.num_pg; i++) {
878 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
879 if (ret)
880 return ret;
881 }
882
883 return 0;
884}
885
886static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
887{
888 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
889 struct hclge_dev *hdev = vport->back;
890 int ret;
891 u8 i;
892
893 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
894 if (ret)
895 return ret;
896
897 for (i = 0; i < kinfo->num_tc; i++) {
898 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i);
899 if (ret)
900 return ret;
901 }
902
903 return 0;
904}
905
906static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
907{
908 struct hclge_vport *vport = hdev->vport;
909 int ret;
910 u8 i;
911
912 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
913 for (i = 0; i < hdev->tm_info.num_tc; i++) {
914 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
915 if (ret)
916 return ret;
917
918 ret = hclge_tm_qs_schd_mode_cfg(hdev, i);
919 if (ret)
920 return ret;
921 }
922 } else {
923 for (i = 0; i < hdev->num_alloc_vport; i++) {
924 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
925 if (ret)
926 return ret;
927
928 vport++;
929 }
930 }
931
932 return 0;
933}
934
935static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
936{
937 int ret;
938
939 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
940 if (ret)
941 return ret;
942
943 return hclge_tm_lvl34_schd_mode_cfg(hdev);
944}
945
946static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
947{
948 int ret;
949
950 /* Cfg tm mapping */
951 ret = hclge_tm_map_cfg(hdev);
952 if (ret)
953 return ret;
954
955 /* Cfg tm shaper */
956 ret = hclge_tm_shaper_cfg(hdev);
957 if (ret)
958 return ret;
959
960 /* Cfg dwrr */
961 ret = hclge_tm_dwrr_cfg(hdev);
962 if (ret)
963 return ret;
964
965 /* Cfg schd mode for each level schd */
966 return hclge_tm_schd_mode_hw(hdev);
967}
968
969int hclge_pause_setup_hw(struct hclge_dev *hdev)
970{
971 bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC;
972 int ret;
973 u8 i;
974
975 ret = hclge_mac_pause_en_cfg(hdev, en, en);
976 if (ret)
977 return ret;
978
979 for (i = 0; i < hdev->tm_info.num_tc; i++) {
980 ret = hclge_tm_qs_bp_cfg(hdev, i);
981 if (ret)
982 return ret;
983 }
984
985 return hclge_up_to_tc_map(hdev);
986}
987
988int hclge_tm_init_hw(struct hclge_dev *hdev)
989{
990 int ret;
991
992 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
993 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
994 return -ENOTSUPP;
995
996 ret = hclge_tm_schd_setup_hw(hdev);
997 if (ret)
998 return ret;
999
1000 ret = hclge_pause_setup_hw(hdev);
1001 if (ret)
1002 return ret;
1003
1004 return 0;
1005}
1006
1007int hclge_tm_schd_init(struct hclge_dev *hdev)
1008{
1009 int ret = hclge_tm_schd_info_init(hdev);
1010
1011 if (ret)
1012 return ret;
1013
1014 return hclge_tm_init_hw(hdev);
1015}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
new file mode 100644
index 000000000000..7e67337dfaf2
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __HCLGE_TM_H
11#define __HCLGE_TM_H
12
13#include <linux/types.h>
14
15/* MAC Pause */
16#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
17#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
18
19#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
20
21/* SP or DWRR */
22#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
23#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
24
25struct hclge_pg_to_pri_link_cmd {
26 u8 pg_id;
27 u8 rsvd1[3];
28 u8 pri_bit_map;
29};
30
31struct hclge_qs_to_pri_link_cmd {
32 __le16 qs_id;
33 __le16 rsvd;
34 u8 priority;
35#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0)
36 u8 link_vld;
37};
38
39struct hclge_nq_to_qs_link_cmd {
40 __le16 nq_id;
41 __le16 rsvd;
42#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10)
43 __le16 qset_id;
44};
45
46struct hclge_pg_weight_cmd {
47 u8 pg_id;
48 u8 dwrr;
49};
50
51struct hclge_priority_weight_cmd {
52 u8 pri_id;
53 u8 dwrr;
54};
55
56struct hclge_qs_weight_cmd {
57 __le16 qs_id;
58 u8 dwrr;
59};
60
61#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
62#define HCLGE_TM_SHAP_IR_B_LSH 0
63#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
64#define HCLGE_TM_SHAP_IR_U_LSH 8
65#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12)
66#define HCLGE_TM_SHAP_IR_S_LSH 12
67#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16)
68#define HCLGE_TM_SHAP_BS_B_LSH 16
69#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21)
70#define HCLGE_TM_SHAP_BS_S_LSH 21
71
72enum hclge_shap_bucket {
73 HCLGE_TM_SHAP_C_BUCKET = 0,
74 HCLGE_TM_SHAP_P_BUCKET,
75};
76
77struct hclge_pri_shapping_cmd {
78 u8 pri_id;
79 u8 rsvd[3];
80 __le32 pri_shapping_para;
81};
82
83struct hclge_pg_shapping_cmd {
84 u8 pg_id;
85 u8 rsvd[3];
86 __le32 pg_shapping_para;
87};
88
89struct hclge_bp_to_qs_map_cmd {
90 u8 tc_id;
91 u8 rsvd[2];
92 u8 qs_group_id;
93 __le32 qs_bit_map;
94 u32 rsvd1;
95};
96
97#define hclge_tm_set_feild(dest, string, val) \
98 hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
99 (HCLGE_TM_SHAP_##string##_LSH), val)
100#define hclge_tm_get_feild(src, string) \
101 hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
102 (HCLGE_TM_SHAP_##string##_LSH))
103
104int hclge_tm_schd_init(struct hclge_dev *hdev);
105int hclge_pause_setup_hw(struct hclge_dev *hdev);
106#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
new file mode 100644
index 000000000000..ad9481c7ceae
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
@@ -0,0 +1,2848 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
22#include <net/vxlan.h>
23
24#include "hnae3.h"
25#include "hns3_enet.h"
26
27const char hns3_driver_name[] = "hns3";
28const char hns3_driver_version[] = VERMAGIC_STRING;
29static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32static struct hnae3_client client;
33
34/* hns3_pci_tbl - PCI Device ID Table
35 *
36 * Last entry must be all 0s
37 *
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
40 */
41static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
50 {0, }
51};
52MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
53
54static irqreturn_t hns3_irq_handle(int irq, void *dev)
55{
56 struct hns3_enet_tqp_vector *tqp_vector = dev;
57
58 napi_schedule(&tqp_vector->napi);
59
60 return IRQ_HANDLED;
61}
62
63static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
64{
65 struct hns3_enet_tqp_vector *tqp_vectors;
66 unsigned int i;
67
68 for (i = 0; i < priv->vector_num; i++) {
69 tqp_vectors = &priv->tqp_vector[i];
70
71 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
72 continue;
73
74 /* release the irq resource */
75 free_irq(tqp_vectors->vector_irq, tqp_vectors);
76 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
77 }
78}
79
80static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
81{
82 struct hns3_enet_tqp_vector *tqp_vectors;
83 int txrx_int_idx = 0;
84 int rx_int_idx = 0;
85 int tx_int_idx = 0;
86 unsigned int i;
87 int ret;
88
89 for (i = 0; i < priv->vector_num; i++) {
90 tqp_vectors = &priv->tqp_vector[i];
91
92 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
93 continue;
94
95 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
96 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
97 "%s-%s-%d", priv->netdev->name, "TxRx",
98 txrx_int_idx++);
99 txrx_int_idx++;
100 } else if (tqp_vectors->rx_group.ring) {
101 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 "%s-%s-%d", priv->netdev->name, "Rx",
103 rx_int_idx++);
104 } else if (tqp_vectors->tx_group.ring) {
105 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
106 "%s-%s-%d", priv->netdev->name, "Tx",
107 tx_int_idx++);
108 } else {
109 /* Skip this unused q_vector */
110 continue;
111 }
112
113 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
114
115 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
116 tqp_vectors->name,
117 tqp_vectors);
118 if (ret) {
119 netdev_err(priv->netdev, "request irq(%d) fail\n",
120 tqp_vectors->vector_irq);
121 return ret;
122 }
123
124 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
125 }
126
127 return 0;
128}
129
130static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
131 u32 mask_en)
132{
133 writel(mask_en, tqp_vector->mask_addr);
134}
135
136static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
137{
138 napi_enable(&tqp_vector->napi);
139
140 /* enable vector */
141 hns3_mask_vector_irq(tqp_vector, 1);
142}
143
144static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
145{
146 /* disable vector */
147 hns3_mask_vector_irq(tqp_vector, 0);
148
149 disable_irq(tqp_vector->vector_irq);
150 napi_disable(&tqp_vector->napi);
151}
152
153static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
154 u32 gl_value)
155{
156 /* this defines the configuration for GL (Interrupt Gap Limiter)
157 * GL defines inter interrupt gap.
158 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
159 */
160 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
161 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
162 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
163}
164
165static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
166 u32 rl_value)
167{
168 /* this defines the configuration for RL (Interrupt Rate Limiter).
169 * Rl defines rate of interrupts i.e. number of interrupts-per-second
170 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
171 */
172 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
173}
174
175static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
176{
177 /* initialize the configuration for interrupt coalescing.
178 * 1. GL (Interrupt Gap Limiter)
179 * 2. RL (Interrupt Rate Limiter)
180 */
181
182 /* Default :enable interrupt coalesce */
183 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
184 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
185 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
186 /* for now we are disabling Interrupt RL - we
187 * will re-enable later
188 */
189 hns3_set_vector_coalesc_rl(tqp_vector, 0);
190 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
191 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
192}
193
194static int hns3_nic_net_up(struct net_device *netdev)
195{
196 struct hns3_nic_priv *priv = netdev_priv(netdev);
197 struct hnae3_handle *h = priv->ae_handle;
198 int i, j;
199 int ret;
200
201 /* get irq resource for all vectors */
202 ret = hns3_nic_init_irq(priv);
203 if (ret) {
204 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
205 return ret;
206 }
207
208 /* enable the vectors */
209 for (i = 0; i < priv->vector_num; i++)
210 hns3_vector_enable(&priv->tqp_vector[i]);
211
212 /* start the ae_dev */
213 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
214 if (ret)
215 goto out_start_err;
216
217 return 0;
218
219out_start_err:
220 for (j = i - 1; j >= 0; j--)
221 hns3_vector_disable(&priv->tqp_vector[j]);
222
223 hns3_nic_uninit_irq(priv);
224
225 return ret;
226}
227
228static int hns3_nic_net_open(struct net_device *netdev)
229{
230 struct hns3_nic_priv *priv = netdev_priv(netdev);
231 struct hnae3_handle *h = priv->ae_handle;
232 int ret;
233
234 netif_carrier_off(netdev);
235
236 ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
237 if (ret) {
238 netdev_err(netdev,
239 "netif_set_real_num_tx_queues fail, ret=%d!\n",
240 ret);
241 return ret;
242 }
243
244 ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
245 if (ret) {
246 netdev_err(netdev,
247 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
248 return ret;
249 }
250
251 ret = hns3_nic_net_up(netdev);
252 if (ret) {
253 netdev_err(netdev,
254 "hns net up fail, ret=%d!\n", ret);
255 return ret;
256 }
257
258 return 0;
259}
260
261static void hns3_nic_net_down(struct net_device *netdev)
262{
263 struct hns3_nic_priv *priv = netdev_priv(netdev);
264 const struct hnae3_ae_ops *ops;
265 int i;
266
267 /* stop ae_dev */
268 ops = priv->ae_handle->ae_algo->ops;
269 if (ops->stop)
270 ops->stop(priv->ae_handle);
271
272 /* disable vectors */
273 for (i = 0; i < priv->vector_num; i++)
274 hns3_vector_disable(&priv->tqp_vector[i]);
275
276 /* free irq resources */
277 hns3_nic_uninit_irq(priv);
278}
279
280static int hns3_nic_net_stop(struct net_device *netdev)
281{
282 netif_tx_stop_all_queues(netdev);
283 netif_carrier_off(netdev);
284
285 hns3_nic_net_down(netdev);
286
287 return 0;
288}
289
290void hns3_set_multicast_list(struct net_device *netdev)
291{
292 struct hns3_nic_priv *priv = netdev_priv(netdev);
293 struct hnae3_handle *h = priv->ae_handle;
294 struct netdev_hw_addr *ha = NULL;
295
296 if (h->ae_algo->ops->set_mc_addr) {
297 netdev_for_each_mc_addr(ha, netdev)
298 if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
299 netdev_err(netdev, "set multicast fail\n");
300 }
301}
302
303static int hns3_nic_uc_sync(struct net_device *netdev,
304 const unsigned char *addr)
305{
306 struct hns3_nic_priv *priv = netdev_priv(netdev);
307 struct hnae3_handle *h = priv->ae_handle;
308
309 if (h->ae_algo->ops->add_uc_addr)
310 return h->ae_algo->ops->add_uc_addr(h, addr);
311
312 return 0;
313}
314
315static int hns3_nic_uc_unsync(struct net_device *netdev,
316 const unsigned char *addr)
317{
318 struct hns3_nic_priv *priv = netdev_priv(netdev);
319 struct hnae3_handle *h = priv->ae_handle;
320
321 if (h->ae_algo->ops->rm_uc_addr)
322 return h->ae_algo->ops->rm_uc_addr(h, addr);
323
324 return 0;
325}
326
327static int hns3_nic_mc_sync(struct net_device *netdev,
328 const unsigned char *addr)
329{
330 struct hns3_nic_priv *priv = netdev_priv(netdev);
331 struct hnae3_handle *h = priv->ae_handle;
332
333 if (h->ae_algo->ops->add_uc_addr)
334 return h->ae_algo->ops->add_mc_addr(h, addr);
335
336 return 0;
337}
338
339static int hns3_nic_mc_unsync(struct net_device *netdev,
340 const unsigned char *addr)
341{
342 struct hns3_nic_priv *priv = netdev_priv(netdev);
343 struct hnae3_handle *h = priv->ae_handle;
344
345 if (h->ae_algo->ops->rm_uc_addr)
346 return h->ae_algo->ops->rm_mc_addr(h, addr);
347
348 return 0;
349}
350
351void hns3_nic_set_rx_mode(struct net_device *netdev)
352{
353 struct hns3_nic_priv *priv = netdev_priv(netdev);
354 struct hnae3_handle *h = priv->ae_handle;
355
356 if (h->ae_algo->ops->set_promisc_mode) {
357 if (netdev->flags & IFF_PROMISC)
358 h->ae_algo->ops->set_promisc_mode(h, 1);
359 else
360 h->ae_algo->ops->set_promisc_mode(h, 0);
361 }
362 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
363 netdev_err(netdev, "sync uc address fail\n");
364 if (netdev->flags & IFF_MULTICAST)
365 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
366 netdev_err(netdev, "sync mc address fail\n");
367}
368
369static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
370 u16 *mss, u32 *type_cs_vlan_tso)
371{
372 u32 l4_offset, hdr_len;
373 union l3_hdr_info l3;
374 union l4_hdr_info l4;
375 u32 l4_paylen;
376 int ret;
377
378 if (!skb_is_gso(skb))
379 return 0;
380
381 ret = skb_cow_head(skb, 0);
382 if (ret)
383 return ret;
384
385 l3.hdr = skb_network_header(skb);
386 l4.hdr = skb_transport_header(skb);
387
388 /* Software should clear the IPv4's checksum field when tso is
389 * needed.
390 */
391 if (l3.v4->version == 4)
392 l3.v4->check = 0;
393
394 /* tunnel packet.*/
395 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
396 SKB_GSO_GRE_CSUM |
397 SKB_GSO_UDP_TUNNEL |
398 SKB_GSO_UDP_TUNNEL_CSUM)) {
399 if ((!(skb_shinfo(skb)->gso_type &
400 SKB_GSO_PARTIAL)) &&
401 (skb_shinfo(skb)->gso_type &
402 SKB_GSO_UDP_TUNNEL_CSUM)) {
403 /* Software should clear the udp's checksum
404 * field when tso is needed.
405 */
406 l4.udp->check = 0;
407 }
408 /* reset l3&l4 pointers from outer to inner headers */
409 l3.hdr = skb_inner_network_header(skb);
410 l4.hdr = skb_inner_transport_header(skb);
411
412 /* Software should clear the IPv4's checksum field when
413 * tso is needed.
414 */
415 if (l3.v4->version == 4)
416 l3.v4->check = 0;
417 }
418
419 /* normal or tunnel packet*/
420 l4_offset = l4.hdr - skb->data;
421 hdr_len = (l4.tcp->doff * 4) + l4_offset;
422
423 /* remove payload length from inner pseudo checksum when tso*/
424 l4_paylen = skb->len - l4_offset;
425 csum_replace_by_diff(&l4.tcp->check,
426 (__force __wsum)htonl(l4_paylen));
427
428 /* find the txbd field values */
429 *paylen = skb->len - hdr_len;
430 hnae_set_bit(*type_cs_vlan_tso,
431 HNS3_TXD_TSO_B, 1);
432
433 /* get MSS for TSO */
434 *mss = skb_shinfo(skb)->gso_size;
435
436 return 0;
437}
438
439static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
440 u8 *il4_proto)
441{
442 union {
443 struct iphdr *v4;
444 struct ipv6hdr *v6;
445 unsigned char *hdr;
446 } l3;
447 unsigned char *l4_hdr;
448 unsigned char *exthdr;
449 u8 l4_proto_tmp;
450 __be16 frag_off;
451
452 /* find outer header point */
453 l3.hdr = skb_network_header(skb);
454 l4_hdr = skb_inner_transport_header(skb);
455
456 if (skb->protocol == htons(ETH_P_IPV6)) {
457 exthdr = l3.hdr + sizeof(*l3.v6);
458 l4_proto_tmp = l3.v6->nexthdr;
459 if (l4_hdr != exthdr)
460 ipv6_skip_exthdr(skb, exthdr - skb->data,
461 &l4_proto_tmp, &frag_off);
462 } else if (skb->protocol == htons(ETH_P_IP)) {
463 l4_proto_tmp = l3.v4->protocol;
464 }
465
466 *ol4_proto = l4_proto_tmp;
467
468 /* tunnel packet */
469 if (!skb->encapsulation) {
470 *il4_proto = 0;
471 return;
472 }
473
474 /* find inner header point */
475 l3.hdr = skb_inner_network_header(skb);
476 l4_hdr = skb_inner_transport_header(skb);
477
478 if (l3.v6->version == 6) {
479 exthdr = l3.hdr + sizeof(*l3.v6);
480 l4_proto_tmp = l3.v6->nexthdr;
481 if (l4_hdr != exthdr)
482 ipv6_skip_exthdr(skb, exthdr - skb->data,
483 &l4_proto_tmp, &frag_off);
484 } else if (l3.v4->version == 4) {
485 l4_proto_tmp = l3.v4->protocol;
486 }
487
488 *il4_proto = l4_proto_tmp;
489}
490
491static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
492 u8 il4_proto, u32 *type_cs_vlan_tso,
493 u32 *ol_type_vlan_len_msec)
494{
495 union {
496 struct iphdr *v4;
497 struct ipv6hdr *v6;
498 unsigned char *hdr;
499 } l3;
500 union {
501 struct tcphdr *tcp;
502 struct udphdr *udp;
503 struct gre_base_hdr *gre;
504 unsigned char *hdr;
505 } l4;
506 unsigned char *l2_hdr;
507 u8 l4_proto = ol4_proto;
508 u32 ol2_len;
509 u32 ol3_len;
510 u32 ol4_len;
511 u32 l2_len;
512 u32 l3_len;
513
514 l3.hdr = skb_network_header(skb);
515 l4.hdr = skb_transport_header(skb);
516
517 /* compute L2 header size for normal packet, defined in 2 Bytes */
518 l2_len = l3.hdr - skb->data;
519 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
520 HNS3_TXD_L2LEN_S, l2_len >> 1);
521
522 /* tunnel packet*/
523 if (skb->encapsulation) {
524 /* compute OL2 header size, defined in 2 Bytes */
525 ol2_len = l2_len;
526 hnae_set_field(*ol_type_vlan_len_msec,
527 HNS3_TXD_L2LEN_M,
528 HNS3_TXD_L2LEN_S, ol2_len >> 1);
529
530 /* compute OL3 header size, defined in 4 Bytes */
531 ol3_len = l4.hdr - l3.hdr;
532 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
533 HNS3_TXD_L3LEN_S, ol3_len >> 2);
534
535 /* MAC in UDP, MAC in GRE (0x6558)*/
536 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
537 /* switch MAC header ptr from outer to inner header.*/
538 l2_hdr = skb_inner_mac_header(skb);
539
540 /* compute OL4 header size, defined in 4 Bytes. */
541 ol4_len = l2_hdr - l4.hdr;
542 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
543 HNS3_TXD_L4LEN_S, ol4_len >> 2);
544
545 /* switch IP header ptr from outer to inner header */
546 l3.hdr = skb_inner_network_header(skb);
547
548 /* compute inner l2 header size, defined in 2 Bytes. */
549 l2_len = l3.hdr - l2_hdr;
550 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
551 HNS3_TXD_L2LEN_S, l2_len >> 1);
552 } else {
553 /* skb packet types not supported by hardware,
554 * txbd len fild doesn't be filled.
555 */
556 return;
557 }
558
559 /* switch L4 header pointer from outer to inner */
560 l4.hdr = skb_inner_transport_header(skb);
561
562 l4_proto = il4_proto;
563 }
564
565 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
566 l3_len = l4.hdr - l3.hdr;
567 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
568 HNS3_TXD_L3LEN_S, l3_len >> 2);
569
570 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
571 switch (l4_proto) {
572 case IPPROTO_TCP:
573 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
574 HNS3_TXD_L4LEN_S, l4.tcp->doff);
575 break;
576 case IPPROTO_SCTP:
577 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
578 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
579 break;
580 case IPPROTO_UDP:
581 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
582 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
583 break;
584 default:
585 /* skb packet types not supported by hardware,
586 * txbd len fild doesn't be filled.
587 */
588 return;
589 }
590}
591
592static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
593 u8 il4_proto, u32 *type_cs_vlan_tso,
594 u32 *ol_type_vlan_len_msec)
595{
596 union {
597 struct iphdr *v4;
598 struct ipv6hdr *v6;
599 unsigned char *hdr;
600 } l3;
601 u32 l4_proto = ol4_proto;
602
603 l3.hdr = skb_network_header(skb);
604
605 /* define OL3 type and tunnel type(OL4).*/
606 if (skb->encapsulation) {
607 /* define outer network header type.*/
608 if (skb->protocol == htons(ETH_P_IP)) {
609 if (skb_is_gso(skb))
610 hnae_set_field(*ol_type_vlan_len_msec,
611 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
612 HNS3_OL3T_IPV4_CSUM);
613 else
614 hnae_set_field(*ol_type_vlan_len_msec,
615 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
616 HNS3_OL3T_IPV4_NO_CSUM);
617
618 } else if (skb->protocol == htons(ETH_P_IPV6)) {
619 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
620 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
621 }
622
623 /* define tunnel type(OL4).*/
624 switch (l4_proto) {
625 case IPPROTO_UDP:
626 hnae_set_field(*ol_type_vlan_len_msec,
627 HNS3_TXD_TUNTYPE_M,
628 HNS3_TXD_TUNTYPE_S,
629 HNS3_TUN_MAC_IN_UDP);
630 break;
631 case IPPROTO_GRE:
632 hnae_set_field(*ol_type_vlan_len_msec,
633 HNS3_TXD_TUNTYPE_M,
634 HNS3_TXD_TUNTYPE_S,
635 HNS3_TUN_NVGRE);
636 break;
637 default:
638 /* drop the skb tunnel packet if hardware don't support,
639 * because hardware can't calculate csum when TSO.
640 */
641 if (skb_is_gso(skb))
642 return -EDOM;
643
644 /* the stack computes the IP header already,
645 * driver calculate l4 checksum when not TSO.
646 */
647 skb_checksum_help(skb);
648 return 0;
649 }
650
651 l3.hdr = skb_inner_network_header(skb);
652 l4_proto = il4_proto;
653 }
654
655 if (l3.v4->version == 4) {
656 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
657 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
658
659 /* the stack computes the IP header already, the only time we
660 * need the hardware to recompute it is in the case of TSO.
661 */
662 if (skb_is_gso(skb))
663 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
664
665 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
666 } else if (l3.v6->version == 6) {
667 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
668 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
669 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
670 }
671
672 switch (l4_proto) {
673 case IPPROTO_TCP:
674 hnae_set_field(*type_cs_vlan_tso,
675 HNS3_TXD_L4T_M,
676 HNS3_TXD_L4T_S,
677 HNS3_L4T_TCP);
678 break;
679 case IPPROTO_UDP:
680 hnae_set_field(*type_cs_vlan_tso,
681 HNS3_TXD_L4T_M,
682 HNS3_TXD_L4T_S,
683 HNS3_L4T_UDP);
684 break;
685 case IPPROTO_SCTP:
686 hnae_set_field(*type_cs_vlan_tso,
687 HNS3_TXD_L4T_M,
688 HNS3_TXD_L4T_S,
689 HNS3_L4T_SCTP);
690 break;
691 default:
692 /* drop the skb tunnel packet if hardware don't support,
693 * because hardware can't calculate csum when TSO.
694 */
695 if (skb_is_gso(skb))
696 return -EDOM;
697
698 /* the stack computes the IP header already,
699 * driver calculate l4 checksum when not TSO.
700 */
701 skb_checksum_help(skb);
702 return 0;
703 }
704
705 return 0;
706}
707
708static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
709{
710 /* Config bd buffer end */
711 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
712 HNS3_TXD_BDTYPE_M, 0);
713 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
714 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
715 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
716}
717
718static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
719 int size, dma_addr_t dma, int frag_end,
720 enum hns_desc_type type)
721{
722 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
723 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
724 u32 ol_type_vlan_len_msec = 0;
725 u16 bdtp_fe_sc_vld_ra_ri = 0;
726 u32 type_cs_vlan_tso = 0;
727 struct sk_buff *skb;
728 u32 paylen = 0;
729 u16 mss = 0;
730 __be16 protocol;
731 u8 ol4_proto;
732 u8 il4_proto;
733 int ret;
734
735 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
736 desc_cb->priv = priv;
737 desc_cb->length = size;
738 desc_cb->dma = dma;
739 desc_cb->type = type;
740
741 /* now, fill the descriptor */
742 desc->addr = cpu_to_le64(dma);
743 desc->tx.send_size = cpu_to_le16((u16)size);
744 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
745 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
746
747 if (type == DESC_TYPE_SKB) {
748 skb = (struct sk_buff *)priv;
749 paylen = cpu_to_le16(skb->len);
750
751 if (skb->ip_summed == CHECKSUM_PARTIAL) {
752 skb_reset_mac_len(skb);
753 protocol = skb->protocol;
754
755 /* vlan packet*/
756 if (protocol == htons(ETH_P_8021Q)) {
757 protocol = vlan_get_protocol(skb);
758 skb->protocol = protocol;
759 }
760 hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
761 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
762 &type_cs_vlan_tso,
763 &ol_type_vlan_len_msec);
764 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
765 &type_cs_vlan_tso,
766 &ol_type_vlan_len_msec);
767 if (ret)
768 return ret;
769
770 ret = hns3_set_tso(skb, &paylen, &mss,
771 &type_cs_vlan_tso);
772 if (ret)
773 return ret;
774 }
775
776 /* Set txbd */
777 desc->tx.ol_type_vlan_len_msec =
778 cpu_to_le32(ol_type_vlan_len_msec);
779 desc->tx.type_cs_vlan_tso_len =
780 cpu_to_le32(type_cs_vlan_tso);
781 desc->tx.paylen = cpu_to_le16(paylen);
782 desc->tx.mss = cpu_to_le16(mss);
783 }
784
785 /* move ring pointer to next.*/
786 ring_ptr_move_fw(ring, next_to_use);
787
788 return 0;
789}
790
791static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
792 int size, dma_addr_t dma, int frag_end,
793 enum hns_desc_type type)
794{
795 unsigned int frag_buf_num;
796 unsigned int k;
797 int sizeoflast;
798 int ret;
799
800 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
801 sizeoflast = size % HNS3_MAX_BD_SIZE;
802 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
803
804 /* When the frag size is bigger than hardware, split this frag */
805 for (k = 0; k < frag_buf_num; k++) {
806 ret = hns3_fill_desc(ring, priv,
807 (k == frag_buf_num - 1) ?
808 sizeoflast : HNS3_MAX_BD_SIZE,
809 dma + HNS3_MAX_BD_SIZE * k,
810 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
811 (type == DESC_TYPE_SKB && !k) ?
812 DESC_TYPE_SKB : DESC_TYPE_PAGE);
813 if (ret)
814 return ret;
815 }
816
817 return 0;
818}
819
820static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
821 struct hns3_enet_ring *ring)
822{
823 struct sk_buff *skb = *out_skb;
824 struct skb_frag_struct *frag;
825 int bdnum_for_frag;
826 int frag_num;
827 int buf_num;
828 int size;
829 int i;
830
831 size = skb_headlen(skb);
832 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
833
834 frag_num = skb_shinfo(skb)->nr_frags;
835 for (i = 0; i < frag_num; i++) {
836 frag = &skb_shinfo(skb)->frags[i];
837 size = skb_frag_size(frag);
838 bdnum_for_frag =
839 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
840 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
841 return -ENOMEM;
842
843 buf_num += bdnum_for_frag;
844 }
845
846 if (buf_num > ring_space(ring))
847 return -EBUSY;
848
849 *bnum = buf_num;
850 return 0;
851}
852
853static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
854 struct hns3_enet_ring *ring)
855{
856 struct sk_buff *skb = *out_skb;
857 int buf_num;
858
859 /* No. of segments (plus a header) */
860 buf_num = skb_shinfo(skb)->nr_frags + 1;
861
862 if (buf_num > ring_space(ring))
863 return -EBUSY;
864
865 *bnum = buf_num;
866
867 return 0;
868}
869
870static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
871{
872 struct device *dev = ring_to_dev(ring);
873 unsigned int i;
874
875 for (i = 0; i < ring->desc_num; i++) {
876 /* check if this is where we started */
877 if (ring->next_to_use == next_to_use_orig)
878 break;
879
880 /* unmap the descriptor dma address */
881 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
882 dma_unmap_single(dev,
883 ring->desc_cb[ring->next_to_use].dma,
884 ring->desc_cb[ring->next_to_use].length,
885 DMA_TO_DEVICE);
886 else
887 dma_unmap_page(dev,
888 ring->desc_cb[ring->next_to_use].dma,
889 ring->desc_cb[ring->next_to_use].length,
890 DMA_TO_DEVICE);
891
892 /* rollback one */
893 ring_ptr_move_bw(ring, next_to_use);
894 }
895}
896
897static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
898 struct net_device *netdev)
899{
900 struct hns3_nic_priv *priv = netdev_priv(netdev);
901 struct hns3_nic_ring_data *ring_data =
902 &tx_ring_data(priv, skb->queue_mapping);
903 struct hns3_enet_ring *ring = ring_data->ring;
904 struct device *dev = priv->dev;
905 struct netdev_queue *dev_queue;
906 struct skb_frag_struct *frag;
907 int next_to_use_head;
908 int next_to_use_frag;
909 dma_addr_t dma;
910 int buf_num;
911 int seg_num;
912 int size;
913 int ret;
914 int i;
915
916 /* Prefetch the data used later */
917 prefetch(skb->data);
918
919 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
920 case -EBUSY:
921 u64_stats_update_begin(&ring->syncp);
922 ring->stats.tx_busy++;
923 u64_stats_update_end(&ring->syncp);
924
925 goto out_net_tx_busy;
926 case -ENOMEM:
927 u64_stats_update_begin(&ring->syncp);
928 ring->stats.sw_err_cnt++;
929 u64_stats_update_end(&ring->syncp);
930 netdev_err(netdev, "no memory to xmit!\n");
931
932 goto out_err_tx_ok;
933 default:
934 break;
935 }
936
937 /* No. of segments (plus a header) */
938 seg_num = skb_shinfo(skb)->nr_frags + 1;
939 /* Fill the first part */
940 size = skb_headlen(skb);
941
942 next_to_use_head = ring->next_to_use;
943
944 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
945 if (dma_mapping_error(dev, dma)) {
946 netdev_err(netdev, "TX head DMA map failed\n");
947 ring->stats.sw_err_cnt++;
948 goto out_err_tx_ok;
949 }
950
951 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
952 DESC_TYPE_SKB);
953 if (ret)
954 goto head_dma_map_err;
955
956 next_to_use_frag = ring->next_to_use;
957 /* Fill the fragments */
958 for (i = 1; i < seg_num; i++) {
959 frag = &skb_shinfo(skb)->frags[i - 1];
960 size = skb_frag_size(frag);
961 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
962 if (dma_mapping_error(dev, dma)) {
963 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
964 ring->stats.sw_err_cnt++;
965 goto frag_dma_map_err;
966 }
967 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
968 seg_num - 1 == i ? 1 : 0,
969 DESC_TYPE_PAGE);
970
971 if (ret)
972 goto frag_dma_map_err;
973 }
974
975 /* Complete translate all packets */
976 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
977 netdev_tx_sent_queue(dev_queue, skb->len);
978
979 wmb(); /* Commit all data before submit */
980
981 hnae_queue_xmit(ring->tqp, buf_num);
982
983 return NETDEV_TX_OK;
984
985frag_dma_map_err:
986 hns_nic_dma_unmap(ring, next_to_use_frag);
987
988head_dma_map_err:
989 hns_nic_dma_unmap(ring, next_to_use_head);
990
991out_err_tx_ok:
992 dev_kfree_skb_any(skb);
993 return NETDEV_TX_OK;
994
995out_net_tx_busy:
996 netif_stop_subqueue(netdev, ring_data->queue_index);
997 smp_mb(); /* Commit all data before submit */
998
999 return NETDEV_TX_BUSY;
1000}
1001
1002static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1003{
1004 struct hns3_nic_priv *priv = netdev_priv(netdev);
1005 struct hnae3_handle *h = priv->ae_handle;
1006 struct sockaddr *mac_addr = p;
1007 int ret;
1008
1009 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1010 return -EADDRNOTAVAIL;
1011
1012 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1013 if (ret) {
1014 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1015 return ret;
1016 }
1017
1018 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1019
1020 return 0;
1021}
1022
1023static int hns3_nic_set_features(struct net_device *netdev,
1024 netdev_features_t features)
1025{
1026 struct hns3_nic_priv *priv = netdev_priv(netdev);
1027
1028 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1029 priv->ops.fill_desc = hns3_fill_desc_tso;
1030 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1031 } else {
1032 priv->ops.fill_desc = hns3_fill_desc;
1033 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1034 }
1035
1036 netdev->features = features;
1037 return 0;
1038}
1039
1040static void
1041hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1042{
1043 struct hns3_nic_priv *priv = netdev_priv(netdev);
1044 int queue_num = priv->ae_handle->kinfo.num_tqps;
1045 struct hns3_enet_ring *ring;
1046 unsigned int start;
1047 unsigned int idx;
1048 u64 tx_bytes = 0;
1049 u64 rx_bytes = 0;
1050 u64 tx_pkts = 0;
1051 u64 rx_pkts = 0;
1052
1053 for (idx = 0; idx < queue_num; idx++) {
1054 /* fetch the tx stats */
1055 ring = priv->ring_data[idx].ring;
1056 do {
1057 tx_bytes += ring->stats.tx_bytes;
1058 tx_pkts += ring->stats.tx_pkts;
1059 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1060
1061 /* fetch the rx stats */
1062 ring = priv->ring_data[idx + queue_num].ring;
1063 do {
1064 rx_bytes += ring->stats.rx_bytes;
1065 rx_pkts += ring->stats.rx_pkts;
1066 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1067 }
1068
1069 stats->tx_bytes = tx_bytes;
1070 stats->tx_packets = tx_pkts;
1071 stats->rx_bytes = rx_bytes;
1072 stats->rx_packets = rx_pkts;
1073
1074 stats->rx_errors = netdev->stats.rx_errors;
1075 stats->multicast = netdev->stats.multicast;
1076 stats->rx_length_errors = netdev->stats.rx_length_errors;
1077 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1078 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1079
1080 stats->tx_errors = netdev->stats.tx_errors;
1081 stats->rx_dropped = netdev->stats.rx_dropped;
1082 stats->tx_dropped = netdev->stats.tx_dropped;
1083 stats->collisions = netdev->stats.collisions;
1084 stats->rx_over_errors = netdev->stats.rx_over_errors;
1085 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1086 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1087 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1088 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1089 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1090 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1091 stats->tx_window_errors = netdev->stats.tx_window_errors;
1092 stats->rx_compressed = netdev->stats.rx_compressed;
1093 stats->tx_compressed = netdev->stats.tx_compressed;
1094}
1095
1096static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1097 enum hns3_udp_tnl_type type)
1098{
1099 struct hns3_nic_priv *priv = netdev_priv(netdev);
1100 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1101 struct hnae3_handle *h = priv->ae_handle;
1102
1103 if (udp_tnl->used && udp_tnl->dst_port == port) {
1104 udp_tnl->used++;
1105 return;
1106 }
1107
1108 if (udp_tnl->used) {
1109 netdev_warn(netdev,
1110 "UDP tunnel [%d], port [%d] offload\n", type, port);
1111 return;
1112 }
1113
1114 udp_tnl->dst_port = port;
1115 udp_tnl->used = 1;
1116 /* TBD send command to hardware to add port */
1117 if (h->ae_algo->ops->add_tunnel_udp)
1118 h->ae_algo->ops->add_tunnel_udp(h, port);
1119}
1120
1121static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1122 enum hns3_udp_tnl_type type)
1123{
1124 struct hns3_nic_priv *priv = netdev_priv(netdev);
1125 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1126 struct hnae3_handle *h = priv->ae_handle;
1127
1128 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1129 netdev_warn(netdev,
1130 "Invalid UDP tunnel port %d\n", port);
1131 return;
1132 }
1133
1134 udp_tnl->used--;
1135 if (udp_tnl->used)
1136 return;
1137
1138 udp_tnl->dst_port = 0;
1139 /* TBD send command to hardware to del port */
1140 if (h->ae_algo->ops->del_tunnel_udp)
1141 h->ae_algo->ops->add_tunnel_udp(h, port);
1142}
1143
1144/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1145 * @netdev: This physical ports's netdev
1146 * @ti: Tunnel information
1147 */
1148static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1149 struct udp_tunnel_info *ti)
1150{
1151 u16 port_n = ntohs(ti->port);
1152
1153 switch (ti->type) {
1154 case UDP_TUNNEL_TYPE_VXLAN:
1155 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1156 break;
1157 case UDP_TUNNEL_TYPE_GENEVE:
1158 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1159 break;
1160 default:
1161 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1162 break;
1163 }
1164}
1165
1166static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1167 struct udp_tunnel_info *ti)
1168{
1169 u16 port_n = ntohs(ti->port);
1170
1171 switch (ti->type) {
1172 case UDP_TUNNEL_TYPE_VXLAN:
1173 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1174 break;
1175 case UDP_TUNNEL_TYPE_GENEVE:
1176 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1177 break;
1178 default:
1179 break;
1180 }
1181}
1182
1183static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1184{
1185 struct hns3_nic_priv *priv = netdev_priv(netdev);
1186 struct hnae3_handle *h = priv->ae_handle;
1187 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1188 unsigned int i;
1189 int ret;
1190
1191 if (tc > HNAE3_MAX_TC)
1192 return -EINVAL;
1193
1194 if (kinfo->num_tc == tc)
1195 return 0;
1196
1197 if (!netdev)
1198 return -EINVAL;
1199
1200 if (!tc) {
1201 netdev_reset_tc(netdev);
1202 return 0;
1203 }
1204
1205 /* Set num_tc for netdev */
1206 ret = netdev_set_num_tc(netdev, tc);
1207 if (ret)
1208 return ret;
1209
1210 /* Set per TC queues for the VSI */
1211 for (i = 0; i < HNAE3_MAX_TC; i++) {
1212 if (kinfo->tc_info[i].enable)
1213 netdev_set_tc_queue(netdev,
1214 kinfo->tc_info[i].tc,
1215 kinfo->tc_info[i].tqp_count,
1216 kinfo->tc_info[i].tqp_offset);
1217 }
1218
1219 return 0;
1220}
1221
1222static int hns3_nic_setup_tc(struct net_device *dev, u32 handle,
1223 u32 chain_index, __be16 protocol,
1224 struct tc_to_netdev *tc)
1225{
1226 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
1227 return -EINVAL;
1228
1229 return hns3_setup_tc(dev, tc->mqprio->num_tc);
1230}
1231
1232static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1233 __be16 proto, u16 vid)
1234{
1235 struct hns3_nic_priv *priv = netdev_priv(netdev);
1236 struct hnae3_handle *h = priv->ae_handle;
1237 int ret = -EIO;
1238
1239 if (h->ae_algo->ops->set_vlan_filter)
1240 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1241
1242 return ret;
1243}
1244
1245static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1246 __be16 proto, u16 vid)
1247{
1248 struct hns3_nic_priv *priv = netdev_priv(netdev);
1249 struct hnae3_handle *h = priv->ae_handle;
1250 int ret = -EIO;
1251
1252 if (h->ae_algo->ops->set_vlan_filter)
1253 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1254
1255 return ret;
1256}
1257
1258static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1259 u8 qos, __be16 vlan_proto)
1260{
1261 struct hns3_nic_priv *priv = netdev_priv(netdev);
1262 struct hnae3_handle *h = priv->ae_handle;
1263 int ret = -EIO;
1264
1265 if (h->ae_algo->ops->set_vf_vlan_filter)
1266 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1267 qos, vlan_proto);
1268
1269 return ret;
1270}
1271
1272static const struct net_device_ops hns3_nic_netdev_ops = {
1273 .ndo_open = hns3_nic_net_open,
1274 .ndo_stop = hns3_nic_net_stop,
1275 .ndo_start_xmit = hns3_nic_net_xmit,
1276 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1277 .ndo_set_features = hns3_nic_set_features,
1278 .ndo_get_stats64 = hns3_nic_get_stats64,
1279 .ndo_setup_tc = hns3_nic_setup_tc,
1280 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1281 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1282 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1283 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1284 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1285 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1286};
1287
1288/* hns3_probe - Device initialization routine
1289 * @pdev: PCI device information struct
1290 * @ent: entry in hns3_pci_tbl
1291 *
1292 * hns3_probe initializes a PF identified by a pci_dev structure.
1293 * The OS initialization, configuring of the PF private structure,
1294 * and a hardware reset occur.
1295 *
1296 * Returns 0 on success, negative on failure
1297 */
1298static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1299{
1300 struct hnae3_ae_dev *ae_dev;
1301 int ret;
1302
1303 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1304 GFP_KERNEL);
1305 if (!ae_dev) {
1306 ret = -ENOMEM;
1307 return ret;
1308 }
1309
1310 ae_dev->pdev = pdev;
1311 ae_dev->dev_type = HNAE3_DEV_KNIC;
1312 pci_set_drvdata(pdev, ae_dev);
1313
1314 return hnae3_register_ae_dev(ae_dev);
1315}
1316
1317/* hns3_remove - Device removal routine
1318 * @pdev: PCI device information struct
1319 */
1320static void hns3_remove(struct pci_dev *pdev)
1321{
1322 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1323
1324 hnae3_unregister_ae_dev(ae_dev);
1325
1326 devm_kfree(&pdev->dev, ae_dev);
1327
1328 pci_set_drvdata(pdev, NULL);
1329}
1330
1331static struct pci_driver hns3_driver = {
1332 .name = hns3_driver_name,
1333 .id_table = hns3_pci_tbl,
1334 .probe = hns3_probe,
1335 .remove = hns3_remove,
1336};
1337
1338/* set default feature to hns3 */
1339static void hns3_set_default_feature(struct net_device *netdev)
1340{
1341 netdev->priv_flags |= IFF_UNICAST_FLT;
1342
1343 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1344 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1345 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1346 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1347 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1348
1349 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1350
1351 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1352
1353 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1354 NETIF_F_HW_VLAN_CTAG_FILTER |
1355 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1356 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1357 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1358 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1359
1360 netdev->vlan_features |=
1361 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1362 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1363 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1364 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1365 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1366
1367 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1368 NETIF_F_HW_VLAN_CTAG_FILTER |
1369 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1370 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1371 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1372 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1373}
1374
1375static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1376 struct hns3_desc_cb *cb)
1377{
1378 unsigned int order = hnae_page_order(ring);
1379 struct page *p;
1380
1381 p = dev_alloc_pages(order);
1382 if (!p)
1383 return -ENOMEM;
1384
1385 cb->priv = p;
1386 cb->page_offset = 0;
1387 cb->reuse_flag = 0;
1388 cb->buf = page_address(p);
1389 cb->length = hnae_page_size(ring);
1390 cb->type = DESC_TYPE_PAGE;
1391
1392 memset(cb->buf, 0, cb->length);
1393
1394 return 0;
1395}
1396
1397static void hns3_free_buffer(struct hns3_enet_ring *ring,
1398 struct hns3_desc_cb *cb)
1399{
1400 if (cb->type == DESC_TYPE_SKB)
1401 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1402 else if (!HNAE3_IS_TX_RING(ring))
1403 put_page((struct page *)cb->priv);
1404 memset(cb, 0, sizeof(*cb));
1405}
1406
1407static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1408{
1409 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1410 cb->length, ring_to_dma_dir(ring));
1411
1412 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1413 return -EIO;
1414
1415 return 0;
1416}
1417
1418static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1419 struct hns3_desc_cb *cb)
1420{
1421 if (cb->type == DESC_TYPE_SKB)
1422 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1423 ring_to_dma_dir(ring));
1424 else
1425 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1426 ring_to_dma_dir(ring));
1427}
1428
1429static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1430{
1431 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1432 ring->desc[i].addr = 0;
1433}
1434
1435static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1436{
1437 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1438
1439 if (!ring->desc_cb[i].dma)
1440 return;
1441
1442 hns3_buffer_detach(ring, i);
1443 hns3_free_buffer(ring, cb);
1444}
1445
1446static void hns3_free_buffers(struct hns3_enet_ring *ring)
1447{
1448 int i;
1449
1450 for (i = 0; i < ring->desc_num; i++)
1451 hns3_free_buffer_detach(ring, i);
1452}
1453
1454/* free desc along with its attached buffer */
1455static void hns3_free_desc(struct hns3_enet_ring *ring)
1456{
1457 hns3_free_buffers(ring);
1458
1459 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1460 ring->desc_num * sizeof(ring->desc[0]),
1461 DMA_BIDIRECTIONAL);
1462 ring->desc_dma_addr = 0;
1463 kfree(ring->desc);
1464 ring->desc = NULL;
1465}
1466
1467static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1468{
1469 int size = ring->desc_num * sizeof(ring->desc[0]);
1470
1471 ring->desc = kzalloc(size, GFP_KERNEL);
1472 if (!ring->desc)
1473 return -ENOMEM;
1474
1475 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1476 size, DMA_BIDIRECTIONAL);
1477 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1478 ring->desc_dma_addr = 0;
1479 kfree(ring->desc);
1480 ring->desc = NULL;
1481 return -ENOMEM;
1482 }
1483
1484 return 0;
1485}
1486
1487static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1488 struct hns3_desc_cb *cb)
1489{
1490 int ret;
1491
1492 ret = hns3_alloc_buffer(ring, cb);
1493 if (ret)
1494 goto out;
1495
1496 ret = hns3_map_buffer(ring, cb);
1497 if (ret)
1498 goto out_with_buf;
1499
1500 return 0;
1501
1502out_with_buf:
1503 hns3_free_buffers(ring);
1504out:
1505 return ret;
1506}
1507
1508static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1509{
1510 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1511
1512 if (ret)
1513 return ret;
1514
1515 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1516
1517 return 0;
1518}
1519
1520/* Allocate memory for raw pkg, and map with dma */
1521static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1522{
1523 int i, j, ret;
1524
1525 for (i = 0; i < ring->desc_num; i++) {
1526 ret = hns3_alloc_buffer_attach(ring, i);
1527 if (ret)
1528 goto out_buffer_fail;
1529 }
1530
1531 return 0;
1532
1533out_buffer_fail:
1534 for (j = i - 1; j >= 0; j--)
1535 hns3_free_buffer_detach(ring, j);
1536 return ret;
1537}
1538
1539/* detach a in-used buffer and replace with a reserved one */
1540static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1541 struct hns3_desc_cb *res_cb)
1542{
1543 hns3_map_buffer(ring, &ring->desc_cb[i]);
1544 ring->desc_cb[i] = *res_cb;
1545 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1546}
1547
1548static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1549{
1550 ring->desc_cb[i].reuse_flag = 0;
1551 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1552 + ring->desc_cb[i].page_offset);
1553}
1554
1555static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1556 int *pkts)
1557{
1558 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1559
1560 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1561 (*bytes) += desc_cb->length;
1562 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1563 hns3_free_buffer_detach(ring, ring->next_to_clean);
1564
1565 ring_ptr_move_fw(ring, next_to_clean);
1566}
1567
1568static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1569{
1570 int u = ring->next_to_use;
1571 int c = ring->next_to_clean;
1572
1573 if (unlikely(h > ring->desc_num))
1574 return 0;
1575
1576 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1577}
1578
1579int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1580{
1581 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1582 struct netdev_queue *dev_queue;
1583 int bytes, pkts;
1584 int head;
1585
1586 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1587 rmb(); /* Make sure head is ready before touch any data */
1588
1589 if (is_ring_empty(ring) || head == ring->next_to_clean)
1590 return 0; /* no data to poll */
1591
1592 if (!is_valid_clean_head(ring, head)) {
1593 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1594 ring->next_to_use, ring->next_to_clean);
1595
1596 u64_stats_update_begin(&ring->syncp);
1597 ring->stats.io_err_cnt++;
1598 u64_stats_update_end(&ring->syncp);
1599 return -EIO;
1600 }
1601
1602 bytes = 0;
1603 pkts = 0;
1604 while (head != ring->next_to_clean && budget) {
1605 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1606 /* Issue prefetch for next Tx descriptor */
1607 prefetch(&ring->desc_cb[ring->next_to_clean]);
1608 budget--;
1609 }
1610
1611 ring->tqp_vector->tx_group.total_bytes += bytes;
1612 ring->tqp_vector->tx_group.total_packets += pkts;
1613
1614 u64_stats_update_begin(&ring->syncp);
1615 ring->stats.tx_bytes += bytes;
1616 ring->stats.tx_pkts += pkts;
1617 u64_stats_update_end(&ring->syncp);
1618
1619 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1620 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1621
1622 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1623 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1624 /* Make sure that anybody stopping the queue after this
1625 * sees the new next_to_clean.
1626 */
1627 smp_mb();
1628 if (netif_tx_queue_stopped(dev_queue)) {
1629 netif_tx_wake_queue(dev_queue);
1630 ring->stats.restart_queue++;
1631 }
1632 }
1633
1634 return !!budget;
1635}
1636
1637static int hns3_desc_unused(struct hns3_enet_ring *ring)
1638{
1639 int ntc = ring->next_to_clean;
1640 int ntu = ring->next_to_use;
1641
1642 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1643}
1644
1645static void
1646hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1647{
1648 struct hns3_desc_cb *desc_cb;
1649 struct hns3_desc_cb res_cbs;
1650 int i, ret;
1651
1652 for (i = 0; i < cleand_count; i++) {
1653 desc_cb = &ring->desc_cb[ring->next_to_use];
1654 if (desc_cb->reuse_flag) {
1655 u64_stats_update_begin(&ring->syncp);
1656 ring->stats.reuse_pg_cnt++;
1657 u64_stats_update_end(&ring->syncp);
1658
1659 hns3_reuse_buffer(ring, ring->next_to_use);
1660 } else {
1661 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1662 if (ret) {
1663 u64_stats_update_begin(&ring->syncp);
1664 ring->stats.sw_err_cnt++;
1665 u64_stats_update_end(&ring->syncp);
1666
1667 netdev_err(ring->tqp->handle->kinfo.netdev,
1668 "hnae reserve buffer map failed.\n");
1669 break;
1670 }
1671 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1672 }
1673
1674 ring_ptr_move_fw(ring, next_to_use);
1675 }
1676
1677 wmb(); /* Make all data has been write before submit */
1678 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1679}
1680
1681/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1682 * @data: pointer to the start of the headers
1683 * @max: total length of section to find headers in
1684 *
1685 * This function is meant to determine the length of headers that will
1686 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1687 * motivation of doing this is to only perform one pull for IPv4 TCP
1688 * packets so that we can do basic things like calculating the gso_size
1689 * based on the average data per packet.
1690 */
1691static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1692 unsigned int max_size)
1693{
1694 unsigned char *network;
1695 u8 hlen;
1696
1697 /* This should never happen, but better safe than sorry */
1698 if (max_size < ETH_HLEN)
1699 return max_size;
1700
1701 /* Initialize network frame pointer */
1702 network = data;
1703
1704 /* Set first protocol and move network header forward */
1705 network += ETH_HLEN;
1706
1707 /* Handle any vlan tag if present */
1708 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1709 == HNS3_RX_FLAG_VLAN_PRESENT) {
1710 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1711 return max_size;
1712
1713 network += VLAN_HLEN;
1714 }
1715
1716 /* Handle L3 protocols */
1717 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1718 == HNS3_RX_FLAG_L3ID_IPV4) {
1719 if ((typeof(max_size))(network - data) >
1720 (max_size - sizeof(struct iphdr)))
1721 return max_size;
1722
1723 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1724 hlen = (network[0] & 0x0F) << 2;
1725
1726 /* Verify hlen meets minimum size requirements */
1727 if (hlen < sizeof(struct iphdr))
1728 return network - data;
1729
1730 /* Record next protocol if header is present */
1731 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1732 == HNS3_RX_FLAG_L3ID_IPV6) {
1733 if ((typeof(max_size))(network - data) >
1734 (max_size - sizeof(struct ipv6hdr)))
1735 return max_size;
1736
1737 /* Record next protocol */
1738 hlen = sizeof(struct ipv6hdr);
1739 } else {
1740 return network - data;
1741 }
1742
1743 /* Relocate pointer to start of L4 header */
1744 network += hlen;
1745
1746 /* Finally sort out TCP/UDP */
1747 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1748 == HNS3_RX_FLAG_L4ID_TCP) {
1749 if ((typeof(max_size))(network - data) >
1750 (max_size - sizeof(struct tcphdr)))
1751 return max_size;
1752
1753 /* Access doff as a u8 to avoid unaligned access on ia64 */
1754 hlen = (network[12] & 0xF0) >> 2;
1755
1756 /* Verify hlen meets minimum size requirements */
1757 if (hlen < sizeof(struct tcphdr))
1758 return network - data;
1759
1760 network += hlen;
1761 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1762 == HNS3_RX_FLAG_L4ID_UDP) {
1763 if ((typeof(max_size))(network - data) >
1764 (max_size - sizeof(struct udphdr)))
1765 return max_size;
1766
1767 network += sizeof(struct udphdr);
1768 }
1769
1770 /* If everything has gone correctly network should be the
1771 * data section of the packet and will be the end of the header.
1772 * If not then it probably represents the end of the last recognized
1773 * header.
1774 */
1775 if ((typeof(max_size))(network - data) < max_size)
1776 return network - data;
1777 else
1778 return max_size;
1779}
1780
1781static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1782 struct hns3_enet_ring *ring, int pull_len,
1783 struct hns3_desc_cb *desc_cb)
1784{
1785 struct hns3_desc *desc;
1786 int truesize, size;
1787 int last_offset;
1788 bool twobufs;
1789
1790 twobufs = ((PAGE_SIZE < 8192) &&
1791 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1792
1793 desc = &ring->desc[ring->next_to_clean];
1794 size = le16_to_cpu(desc->rx.size);
1795
1796 if (twobufs) {
1797 truesize = hnae_buf_size(ring);
1798 } else {
1799 truesize = ALIGN(size, L1_CACHE_BYTES);
1800 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1801 }
1802
1803 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1804 size - pull_len, truesize - pull_len);
1805
1806 /* Avoid re-using remote pages,flag default unreuse */
1807 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1808 return;
1809
1810 if (twobufs) {
1811 /* If we are only owner of page we can reuse it */
1812 if (likely(page_count(desc_cb->priv) == 1)) {
1813 /* Flip page offset to other buffer */
1814 desc_cb->page_offset ^= truesize;
1815
1816 desc_cb->reuse_flag = 1;
1817 /* bump ref count on page before it is given*/
1818 get_page(desc_cb->priv);
1819 }
1820 return;
1821 }
1822
1823 /* Move offset up to the next cache line */
1824 desc_cb->page_offset += truesize;
1825
1826 if (desc_cb->page_offset <= last_offset) {
1827 desc_cb->reuse_flag = 1;
1828 /* Bump ref count on page before it is given*/
1829 get_page(desc_cb->priv);
1830 }
1831}
1832
1833static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1834 struct hns3_desc *desc)
1835{
1836 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1837 int l3_type, l4_type;
1838 u32 bd_base_info;
1839 int ol4_type;
1840 u32 l234info;
1841
1842 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1843 l234info = le32_to_cpu(desc->rx.l234_info);
1844
1845 skb->ip_summed = CHECKSUM_NONE;
1846
1847 skb_checksum_none_assert(skb);
1848
1849 if (!(netdev->features & NETIF_F_RXCSUM))
1850 return;
1851
1852 /* check if hardware has done checksum */
1853 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1854 return;
1855
1856 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1857 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1858 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1859 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1860 netdev_err(netdev, "L3/L4 error pkt\n");
1861 u64_stats_update_begin(&ring->syncp);
1862 ring->stats.l3l4_csum_err++;
1863 u64_stats_update_end(&ring->syncp);
1864
1865 return;
1866 }
1867
1868 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1869 HNS3_RXD_L3ID_S);
1870 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1871 HNS3_RXD_L4ID_S);
1872
1873 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1874 switch (ol4_type) {
1875 case HNS3_OL4_TYPE_MAC_IN_UDP:
1876 case HNS3_OL4_TYPE_NVGRE:
1877 skb->csum_level = 1;
1878 case HNS3_OL4_TYPE_NO_TUN:
1879 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1880 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1881 (l3_type == HNS3_L3_TYPE_IPV6 &&
1882 (l4_type == HNS3_L4_TYPE_UDP ||
1883 l4_type == HNS3_L4_TYPE_TCP ||
1884 l4_type == HNS3_L4_TYPE_SCTP)))
1885 skb->ip_summed = CHECKSUM_UNNECESSARY;
1886 break;
1887 }
1888}
1889
1890static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1891 struct sk_buff **out_skb, int *out_bnum)
1892{
1893 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1894 struct hns3_desc_cb *desc_cb;
1895 struct hns3_desc *desc;
1896 struct sk_buff *skb;
1897 unsigned char *va;
1898 u32 bd_base_info;
1899 int pull_len;
1900 u32 l234info;
1901 int length;
1902 int bnum;
1903
1904 desc = &ring->desc[ring->next_to_clean];
1905 desc_cb = &ring->desc_cb[ring->next_to_clean];
1906
1907 prefetch(desc);
1908
1909 length = le16_to_cpu(desc->rx.pkt_len);
1910 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1911 l234info = le32_to_cpu(desc->rx.l234_info);
1912
1913 /* Check valid BD */
1914 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1915 return -EFAULT;
1916
1917 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1918
1919 /* Prefetch first cache line of first page
1920 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1921 * line size is 64B so need to prefetch twice to make it 128B. But in
1922 * actual we can have greater size of caches with 128B Level 1 cache
1923 * lines. In such a case, single fetch would suffice to cache in the
1924 * relevant part of the header.
1925 */
1926 prefetch(va);
1927#if L1_CACHE_BYTES < 128
1928 prefetch(va + L1_CACHE_BYTES);
1929#endif
1930
1931 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1932 HNS3_RX_HEAD_SIZE);
1933 if (unlikely(!skb)) {
1934 netdev_err(netdev, "alloc rx skb fail\n");
1935
1936 u64_stats_update_begin(&ring->syncp);
1937 ring->stats.sw_err_cnt++;
1938 u64_stats_update_end(&ring->syncp);
1939
1940 return -ENOMEM;
1941 }
1942
1943 prefetchw(skb->data);
1944
1945 bnum = 1;
1946 if (length <= HNS3_RX_HEAD_SIZE) {
1947 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
1948
1949 /* We can reuse buffer as-is, just make sure it is local */
1950 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
1951 desc_cb->reuse_flag = 1;
1952 else /* This page cannot be reused so discard it */
1953 put_page(desc_cb->priv);
1954
1955 ring_ptr_move_fw(ring, next_to_clean);
1956 } else {
1957 u64_stats_update_begin(&ring->syncp);
1958 ring->stats.seg_pkt_cnt++;
1959 u64_stats_update_end(&ring->syncp);
1960
1961 pull_len = hns3_nic_get_headlen(va, l234info,
1962 HNS3_RX_HEAD_SIZE);
1963 memcpy(__skb_put(skb, pull_len), va,
1964 ALIGN(pull_len, sizeof(long)));
1965
1966 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
1967 ring_ptr_move_fw(ring, next_to_clean);
1968
1969 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1970 desc = &ring->desc[ring->next_to_clean];
1971 desc_cb = &ring->desc_cb[ring->next_to_clean];
1972 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1973 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
1974 ring_ptr_move_fw(ring, next_to_clean);
1975 bnum++;
1976 }
1977 }
1978
1979 *out_bnum = bnum;
1980
1981 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
1982 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
1983 ((u64 *)desc)[0], ((u64 *)desc)[1]);
1984 u64_stats_update_begin(&ring->syncp);
1985 ring->stats.non_vld_descs++;
1986 u64_stats_update_end(&ring->syncp);
1987
1988 dev_kfree_skb_any(skb);
1989 return -EINVAL;
1990 }
1991
1992 if (unlikely((!desc->rx.pkt_len) ||
1993 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
1994 netdev_err(netdev, "truncated pkt\n");
1995 u64_stats_update_begin(&ring->syncp);
1996 ring->stats.err_pkt_len++;
1997 u64_stats_update_end(&ring->syncp);
1998
1999 dev_kfree_skb_any(skb);
2000 return -EFAULT;
2001 }
2002
2003 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2004 netdev_err(netdev, "L2 error pkt\n");
2005 u64_stats_update_begin(&ring->syncp);
2006 ring->stats.l2_err++;
2007 u64_stats_update_end(&ring->syncp);
2008
2009 dev_kfree_skb_any(skb);
2010 return -EFAULT;
2011 }
2012
2013 u64_stats_update_begin(&ring->syncp);
2014 ring->stats.rx_pkts++;
2015 ring->stats.rx_bytes += skb->len;
2016 u64_stats_update_end(&ring->syncp);
2017
2018 ring->tqp_vector->rx_group.total_bytes += skb->len;
2019
2020 hns3_rx_checksum(ring, skb, desc);
2021 return 0;
2022}
2023
2024static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2025{
2026#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2027 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2028 int recv_pkts, recv_bds, clean_count, err;
2029 int unused_count = hns3_desc_unused(ring);
2030 struct sk_buff *skb = NULL;
2031 int num, bnum = 0;
2032
2033 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2034 rmb(); /* Make sure num taken effect before the other data is touched */
2035
2036 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2037 num -= unused_count;
2038
2039 while (recv_pkts < budget && recv_bds < num) {
2040 /* Reuse or realloc buffers */
2041 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2042 hns3_nic_alloc_rx_buffers(ring,
2043 clean_count + unused_count);
2044 clean_count = 0;
2045 unused_count = hns3_desc_unused(ring);
2046 }
2047
2048 /* Poll one pkt */
2049 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2050 if (unlikely(!skb)) /* This fault cannot be repaired */
2051 goto out;
2052
2053 recv_bds += bnum;
2054 clean_count += bnum;
2055 if (unlikely(err)) { /* Do jump the err */
2056 recv_pkts++;
2057 continue;
2058 }
2059
2060 /* Do update ip stack process */
2061 skb->protocol = eth_type_trans(skb, netdev);
2062 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2063
2064 recv_pkts++;
2065 }
2066
2067out:
2068 /* Make all data has been write before submit */
2069 if (clean_count + unused_count > 0)
2070 hns3_nic_alloc_rx_buffers(ring,
2071 clean_count + unused_count);
2072
2073 return recv_pkts;
2074}
2075
2076static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2077{
2078#define HNS3_RX_ULTRA_PACKET_RATE 40000
2079 enum hns3_flow_level_range new_flow_level;
2080 struct hns3_enet_tqp_vector *tqp_vector;
2081 int packets_per_secs;
2082 int bytes_per_usecs;
2083 u16 new_int_gl;
2084 int usecs;
2085
2086 if (!ring_group->int_gl)
2087 return false;
2088
2089 if (ring_group->total_packets == 0) {
2090 ring_group->int_gl = HNS3_INT_GL_50K;
2091 ring_group->flow_level = HNS3_FLOW_LOW;
2092 return true;
2093 }
2094
2095 /* Simple throttlerate management
2096 * 0-10MB/s lower (50000 ints/s)
2097 * 10-20MB/s middle (20000 ints/s)
2098 * 20-1249MB/s high (18000 ints/s)
2099 * > 40000pps ultra (8000 ints/s)
2100 */
2101 new_flow_level = ring_group->flow_level;
2102 new_int_gl = ring_group->int_gl;
2103 tqp_vector = ring_group->ring->tqp_vector;
2104 usecs = (ring_group->int_gl << 1);
2105 bytes_per_usecs = ring_group->total_bytes / usecs;
2106 /* 1000000 microseconds */
2107 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2108
2109 switch (new_flow_level) {
2110 case HNS3_FLOW_LOW:
2111 if (bytes_per_usecs > 10)
2112 new_flow_level = HNS3_FLOW_MID;
2113 break;
2114 case HNS3_FLOW_MID:
2115 if (bytes_per_usecs > 20)
2116 new_flow_level = HNS3_FLOW_HIGH;
2117 else if (bytes_per_usecs <= 10)
2118 new_flow_level = HNS3_FLOW_LOW;
2119 break;
2120 case HNS3_FLOW_HIGH:
2121 case HNS3_FLOW_ULTRA:
2122 default:
2123 if (bytes_per_usecs <= 20)
2124 new_flow_level = HNS3_FLOW_MID;
2125 break;
2126 }
2127
2128 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2129 (&tqp_vector->rx_group == ring_group))
2130 new_flow_level = HNS3_FLOW_ULTRA;
2131
2132 switch (new_flow_level) {
2133 case HNS3_FLOW_LOW:
2134 new_int_gl = HNS3_INT_GL_50K;
2135 break;
2136 case HNS3_FLOW_MID:
2137 new_int_gl = HNS3_INT_GL_20K;
2138 break;
2139 case HNS3_FLOW_HIGH:
2140 new_int_gl = HNS3_INT_GL_18K;
2141 break;
2142 case HNS3_FLOW_ULTRA:
2143 new_int_gl = HNS3_INT_GL_8K;
2144 break;
2145 default:
2146 break;
2147 }
2148
2149 ring_group->total_bytes = 0;
2150 ring_group->total_packets = 0;
2151 ring_group->flow_level = new_flow_level;
2152 if (new_int_gl != ring_group->int_gl) {
2153 ring_group->int_gl = new_int_gl;
2154 return true;
2155 }
2156 return false;
2157}
2158
2159static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2160{
2161 u16 rx_int_gl, tx_int_gl;
2162 bool rx, tx;
2163
2164 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2165 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2166 rx_int_gl = tqp_vector->rx_group.int_gl;
2167 tx_int_gl = tqp_vector->tx_group.int_gl;
2168 if (rx && tx) {
2169 if (rx_int_gl > tx_int_gl) {
2170 tqp_vector->tx_group.int_gl = rx_int_gl;
2171 tqp_vector->tx_group.flow_level =
2172 tqp_vector->rx_group.flow_level;
2173 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2174 } else {
2175 tqp_vector->rx_group.int_gl = tx_int_gl;
2176 tqp_vector->rx_group.flow_level =
2177 tqp_vector->tx_group.flow_level;
2178 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2179 }
2180 }
2181}
2182
2183static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2184{
2185 struct hns3_enet_ring *ring;
2186 int rx_pkt_total = 0;
2187
2188 struct hns3_enet_tqp_vector *tqp_vector =
2189 container_of(napi, struct hns3_enet_tqp_vector, napi);
2190 bool clean_complete = true;
2191 int rx_budget;
2192
2193 /* Since the actual Tx work is minimal, we can give the Tx a larger
2194 * budget and be more aggressive about cleaning up the Tx descriptors.
2195 */
2196 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2197 if (!hns3_clean_tx_ring(ring, budget))
2198 clean_complete = false;
2199 }
2200
2201 /* make sure rx ring budget not smaller than 1 */
2202 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2203
2204 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2205 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2206
2207 if (rx_cleaned >= rx_budget)
2208 clean_complete = false;
2209
2210 rx_pkt_total += rx_cleaned;
2211 }
2212
2213 tqp_vector->rx_group.total_packets += rx_pkt_total;
2214
2215 if (!clean_complete)
2216 return budget;
2217
2218 napi_complete(napi);
2219 hns3_update_new_int_gl(tqp_vector);
2220 hns3_mask_vector_irq(tqp_vector, 1);
2221
2222 return rx_pkt_total;
2223}
2224
2225static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2226 struct hnae3_ring_chain_node *head)
2227{
2228 struct pci_dev *pdev = tqp_vector->handle->pdev;
2229 struct hnae3_ring_chain_node *cur_chain = head;
2230 struct hnae3_ring_chain_node *chain;
2231 struct hns3_enet_ring *tx_ring;
2232 struct hns3_enet_ring *rx_ring;
2233
2234 tx_ring = tqp_vector->tx_group.ring;
2235 if (tx_ring) {
2236 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2237 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2238 HNAE3_RING_TYPE_TX);
2239
2240 cur_chain->next = NULL;
2241
2242 while (tx_ring->next) {
2243 tx_ring = tx_ring->next;
2244
2245 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2246 GFP_KERNEL);
2247 if (!chain)
2248 return -ENOMEM;
2249
2250 cur_chain->next = chain;
2251 chain->tqp_index = tx_ring->tqp->tqp_index;
2252 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2253 HNAE3_RING_TYPE_TX);
2254
2255 cur_chain = chain;
2256 }
2257 }
2258
2259 rx_ring = tqp_vector->rx_group.ring;
2260 if (!tx_ring && rx_ring) {
2261 cur_chain->next = NULL;
2262 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2263 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2264 HNAE3_RING_TYPE_RX);
2265
2266 rx_ring = rx_ring->next;
2267 }
2268
2269 while (rx_ring) {
2270 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2271 if (!chain)
2272 return -ENOMEM;
2273
2274 cur_chain->next = chain;
2275 chain->tqp_index = rx_ring->tqp->tqp_index;
2276 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2277 HNAE3_RING_TYPE_RX);
2278 cur_chain = chain;
2279
2280 rx_ring = rx_ring->next;
2281 }
2282
2283 return 0;
2284}
2285
2286static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2287 struct hnae3_ring_chain_node *head)
2288{
2289 struct pci_dev *pdev = tqp_vector->handle->pdev;
2290 struct hnae3_ring_chain_node *chain_tmp, *chain;
2291
2292 chain = head->next;
2293
2294 while (chain) {
2295 chain_tmp = chain->next;
2296 devm_kfree(&pdev->dev, chain);
2297 chain = chain_tmp;
2298 }
2299}
2300
2301static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2302 struct hns3_enet_ring *ring)
2303{
2304 ring->next = group->ring;
2305 group->ring = ring;
2306
2307 group->count++;
2308}
2309
2310static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2311{
2312 struct hnae3_ring_chain_node vector_ring_chain;
2313 struct hnae3_handle *h = priv->ae_handle;
2314 struct hns3_enet_tqp_vector *tqp_vector;
2315 struct hnae3_vector_info *vector;
2316 struct pci_dev *pdev = h->pdev;
2317 u16 tqp_num = h->kinfo.num_tqps;
2318 u16 vector_num;
2319 int ret = 0;
2320 u16 i;
2321
2322 /* RSS size, cpu online and vector_num should be the same */
2323 /* Should consider 2p/4p later */
2324 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2325 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2326 GFP_KERNEL);
2327 if (!vector)
2328 return -ENOMEM;
2329
2330 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2331
2332 priv->vector_num = vector_num;
2333 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2334 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2335 GFP_KERNEL);
2336 if (!priv->tqp_vector)
2337 return -ENOMEM;
2338
2339 for (i = 0; i < tqp_num; i++) {
2340 u16 vector_i = i % vector_num;
2341
2342 tqp_vector = &priv->tqp_vector[vector_i];
2343
2344 hns3_add_ring_to_group(&tqp_vector->tx_group,
2345 priv->ring_data[i].ring);
2346
2347 hns3_add_ring_to_group(&tqp_vector->rx_group,
2348 priv->ring_data[i + tqp_num].ring);
2349
2350 tqp_vector->idx = vector_i;
2351 tqp_vector->mask_addr = vector[vector_i].io_addr;
2352 tqp_vector->vector_irq = vector[vector_i].vector;
2353 tqp_vector->num_tqps++;
2354
2355 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2356 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2357 }
2358
2359 for (i = 0; i < vector_num; i++) {
2360 tqp_vector = &priv->tqp_vector[i];
2361
2362 tqp_vector->rx_group.total_bytes = 0;
2363 tqp_vector->rx_group.total_packets = 0;
2364 tqp_vector->tx_group.total_bytes = 0;
2365 tqp_vector->tx_group.total_packets = 0;
2366 hns3_vector_gl_rl_init(tqp_vector);
2367 tqp_vector->handle = h;
2368
2369 ret = hns3_get_vector_ring_chain(tqp_vector,
2370 &vector_ring_chain);
2371 if (ret)
2372 goto out;
2373
2374 ret = h->ae_algo->ops->map_ring_to_vector(h,
2375 tqp_vector->vector_irq, &vector_ring_chain);
2376 if (ret)
2377 goto out;
2378
2379 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2380
2381 netif_napi_add(priv->netdev, &tqp_vector->napi,
2382 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2383 }
2384
2385out:
2386 devm_kfree(&pdev->dev, vector);
2387 return ret;
2388}
2389
2390static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2391{
2392 struct hnae3_ring_chain_node vector_ring_chain;
2393 struct hnae3_handle *h = priv->ae_handle;
2394 struct hns3_enet_tqp_vector *tqp_vector;
2395 struct pci_dev *pdev = h->pdev;
2396 int i, ret;
2397
2398 for (i = 0; i < priv->vector_num; i++) {
2399 tqp_vector = &priv->tqp_vector[i];
2400
2401 ret = hns3_get_vector_ring_chain(tqp_vector,
2402 &vector_ring_chain);
2403 if (ret)
2404 return ret;
2405
2406 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2407 tqp_vector->vector_irq, &vector_ring_chain);
2408 if (ret)
2409 return ret;
2410
2411 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2412
2413 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2414 (void)irq_set_affinity_hint(
2415 priv->tqp_vector[i].vector_irq,
2416 NULL);
2417 devm_free_irq(&pdev->dev,
2418 priv->tqp_vector[i].vector_irq,
2419 &priv->tqp_vector[i]);
2420 }
2421
2422 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2423
2424 netif_napi_del(&priv->tqp_vector[i].napi);
2425 }
2426
2427 devm_kfree(&pdev->dev, priv->tqp_vector);
2428
2429 return 0;
2430}
2431
2432static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2433 int ring_type)
2434{
2435 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2436 int queue_num = priv->ae_handle->kinfo.num_tqps;
2437 struct pci_dev *pdev = priv->ae_handle->pdev;
2438 struct hns3_enet_ring *ring;
2439
2440 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2441 if (!ring)
2442 return -ENOMEM;
2443
2444 if (ring_type == HNAE3_RING_TYPE_TX) {
2445 ring_data[q->tqp_index].ring = ring;
2446 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2447 } else {
2448 ring_data[q->tqp_index + queue_num].ring = ring;
2449 ring->io_base = q->io_base;
2450 }
2451
2452 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2453
2454 ring_data[q->tqp_index].queue_index = q->tqp_index;
2455
2456 ring->tqp = q;
2457 ring->desc = NULL;
2458 ring->desc_cb = NULL;
2459 ring->dev = priv->dev;
2460 ring->desc_dma_addr = 0;
2461 ring->buf_size = q->buf_size;
2462 ring->desc_num = q->desc_num;
2463 ring->next_to_use = 0;
2464 ring->next_to_clean = 0;
2465
2466 return 0;
2467}
2468
2469static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2470 struct hns3_nic_priv *priv)
2471{
2472 int ret;
2473
2474 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2475 if (ret)
2476 return ret;
2477
2478 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2479 if (ret)
2480 return ret;
2481
2482 return 0;
2483}
2484
2485static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2486{
2487 struct hnae3_handle *h = priv->ae_handle;
2488 struct pci_dev *pdev = h->pdev;
2489 int i, ret;
2490
2491 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2492 sizeof(*priv->ring_data) * 2,
2493 GFP_KERNEL);
2494 if (!priv->ring_data)
2495 return -ENOMEM;
2496
2497 for (i = 0; i < h->kinfo.num_tqps; i++) {
2498 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2499 if (ret)
2500 goto err;
2501 }
2502
2503 return 0;
2504err:
2505 devm_kfree(&pdev->dev, priv->ring_data);
2506 return ret;
2507}
2508
2509static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2510{
2511 int ret;
2512
2513 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2514 return -EINVAL;
2515
2516 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2517 GFP_KERNEL);
2518 if (!ring->desc_cb) {
2519 ret = -ENOMEM;
2520 goto out;
2521 }
2522
2523 ret = hns3_alloc_desc(ring);
2524 if (ret)
2525 goto out_with_desc_cb;
2526
2527 if (!HNAE3_IS_TX_RING(ring)) {
2528 ret = hns3_alloc_ring_buffers(ring);
2529 if (ret)
2530 goto out_with_desc;
2531 }
2532
2533 return 0;
2534
2535out_with_desc:
2536 hns3_free_desc(ring);
2537out_with_desc_cb:
2538 kfree(ring->desc_cb);
2539 ring->desc_cb = NULL;
2540out:
2541 return ret;
2542}
2543
2544static void hns3_fini_ring(struct hns3_enet_ring *ring)
2545{
2546 hns3_free_desc(ring);
2547 kfree(ring->desc_cb);
2548 ring->desc_cb = NULL;
2549 ring->next_to_clean = 0;
2550 ring->next_to_use = 0;
2551}
2552
2553int hns3_buf_size2type(u32 buf_size)
2554{
2555 int bd_size_type;
2556
2557 switch (buf_size) {
2558 case 512:
2559 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2560 break;
2561 case 1024:
2562 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2563 break;
2564 case 2048:
2565 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2566 break;
2567 case 4096:
2568 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2569 break;
2570 default:
2571 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2572 }
2573
2574 return bd_size_type;
2575}
2576
2577static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2578{
2579 dma_addr_t dma = ring->desc_dma_addr;
2580 struct hnae3_queue *q = ring->tqp;
2581
2582 if (!HNAE3_IS_TX_RING(ring)) {
2583 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2584 (u32)dma);
2585 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2586 (u32)((dma >> 31) >> 1));
2587
2588 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2589 hns3_buf_size2type(ring->buf_size));
2590 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2591 ring->desc_num / 8 - 1);
2592
2593 } else {
2594 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2595 (u32)dma);
2596 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2597 (u32)((dma >> 31) >> 1));
2598
2599 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2600 hns3_buf_size2type(ring->buf_size));
2601 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2602 ring->desc_num / 8 - 1);
2603 }
2604}
2605
2606static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2607{
2608 struct hnae3_handle *h = priv->ae_handle;
2609 int ring_num = h->kinfo.num_tqps * 2;
2610 int i, j;
2611 int ret;
2612
2613 for (i = 0; i < ring_num; i++) {
2614 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2615 if (ret) {
2616 dev_err(priv->dev,
2617 "Alloc ring memory fail! ret=%d\n", ret);
2618 goto out_when_alloc_ring_memory;
2619 }
2620
2621 hns3_init_ring_hw(priv->ring_data[i].ring);
2622
2623 u64_stats_init(&priv->ring_data[i].ring->syncp);
2624 }
2625
2626 return 0;
2627
2628out_when_alloc_ring_memory:
2629 for (j = i - 1; j >= 0; j--)
2630 hns3_fini_ring(priv->ring_data[i].ring);
2631
2632 return -ENOMEM;
2633}
2634
2635static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2636{
2637 struct hnae3_handle *h = priv->ae_handle;
2638 int i;
2639
2640 for (i = 0; i < h->kinfo.num_tqps; i++) {
2641 if (h->ae_algo->ops->reset_queue)
2642 h->ae_algo->ops->reset_queue(h, i);
2643
2644 hns3_fini_ring(priv->ring_data[i].ring);
2645 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2646 }
2647
2648 return 0;
2649}
2650
2651/* Set mac addr if it is configured. or leave it to the AE driver */
2652static void hns3_init_mac_addr(struct net_device *netdev)
2653{
2654 struct hns3_nic_priv *priv = netdev_priv(netdev);
2655 struct hnae3_handle *h = priv->ae_handle;
2656 u8 mac_addr_temp[ETH_ALEN];
2657
2658 if (h->ae_algo->ops->get_mac_addr) {
2659 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2660 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2661 }
2662
2663 /* Check if the MAC address is valid, if not get a random one */
2664 if (!is_valid_ether_addr(netdev->dev_addr)) {
2665 eth_hw_addr_random(netdev);
2666 dev_warn(priv->dev, "using random MAC address %pM\n",
2667 netdev->dev_addr);
2668 /* Also copy this new MAC address into hdev */
2669 if (h->ae_algo->ops->set_mac_addr)
2670 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2671 }
2672}
2673
2674static void hns3_nic_set_priv_ops(struct net_device *netdev)
2675{
2676 struct hns3_nic_priv *priv = netdev_priv(netdev);
2677
2678 if ((netdev->features & NETIF_F_TSO) ||
2679 (netdev->features & NETIF_F_TSO6)) {
2680 priv->ops.fill_desc = hns3_fill_desc_tso;
2681 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2682 } else {
2683 priv->ops.fill_desc = hns3_fill_desc;
2684 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2685 }
2686}
2687
2688static int hns3_client_init(struct hnae3_handle *handle)
2689{
2690 struct pci_dev *pdev = handle->pdev;
2691 struct hns3_nic_priv *priv;
2692 struct net_device *netdev;
2693 int ret;
2694
2695 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2696 handle->kinfo.num_tqps);
2697 if (!netdev)
2698 return -ENOMEM;
2699
2700 priv = netdev_priv(netdev);
2701 priv->dev = &pdev->dev;
2702 priv->netdev = netdev;
2703 priv->ae_handle = handle;
2704
2705 handle->kinfo.netdev = netdev;
2706 handle->priv = (void *)priv;
2707
2708 hns3_init_mac_addr(netdev);
2709
2710 hns3_set_default_feature(netdev);
2711
2712 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2713 netdev->priv_flags |= IFF_UNICAST_FLT;
2714 netdev->netdev_ops = &hns3_nic_netdev_ops;
2715 SET_NETDEV_DEV(netdev, &pdev->dev);
2716 hns3_ethtool_set_ops(netdev);
2717 hns3_nic_set_priv_ops(netdev);
2718
2719 /* Carrier off reporting is important to ethtool even BEFORE open */
2720 netif_carrier_off(netdev);
2721
2722 ret = hns3_get_ring_config(priv);
2723 if (ret) {
2724 ret = -ENOMEM;
2725 goto out_get_ring_cfg;
2726 }
2727
2728 ret = hns3_nic_init_vector_data(priv);
2729 if (ret) {
2730 ret = -ENOMEM;
2731 goto out_init_vector_data;
2732 }
2733
2734 ret = hns3_init_all_ring(priv);
2735 if (ret) {
2736 ret = -ENOMEM;
2737 goto out_init_ring_data;
2738 }
2739
2740 ret = register_netdev(netdev);
2741 if (ret) {
2742 dev_err(priv->dev, "probe register netdev fail!\n");
2743 goto out_reg_netdev_fail;
2744 }
2745
2746 return ret;
2747
2748out_reg_netdev_fail:
2749out_init_ring_data:
2750 (void)hns3_nic_uninit_vector_data(priv);
2751 priv->ring_data = NULL;
2752out_init_vector_data:
2753out_get_ring_cfg:
2754 priv->ae_handle = NULL;
2755 free_netdev(netdev);
2756 return ret;
2757}
2758
2759static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2760{
2761 struct net_device *netdev = handle->kinfo.netdev;
2762 struct hns3_nic_priv *priv = netdev_priv(netdev);
2763 int ret;
2764
2765 if (netdev->reg_state != NETREG_UNINITIALIZED)
2766 unregister_netdev(netdev);
2767
2768 ret = hns3_nic_uninit_vector_data(priv);
2769 if (ret)
2770 netdev_err(netdev, "uninit vector error\n");
2771
2772 ret = hns3_uninit_all_ring(priv);
2773 if (ret)
2774 netdev_err(netdev, "uninit ring error\n");
2775
2776 priv->ring_data = NULL;
2777
2778 free_netdev(netdev);
2779}
2780
2781static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2782{
2783 struct net_device *netdev = handle->kinfo.netdev;
2784
2785 if (!netdev)
2786 return;
2787
2788 if (linkup) {
2789 netif_carrier_on(netdev);
2790 netif_tx_wake_all_queues(netdev);
2791 netdev_info(netdev, "link up\n");
2792 } else {
2793 netif_carrier_off(netdev);
2794 netif_tx_stop_all_queues(netdev);
2795 netdev_info(netdev, "link down\n");
2796 }
2797}
2798
2799const struct hnae3_client_ops client_ops = {
2800 .init_instance = hns3_client_init,
2801 .uninit_instance = hns3_client_uninit,
2802 .link_status_change = hns3_link_status_change,
2803};
2804
2805/* hns3_init_module - Driver registration routine
2806 * hns3_init_module is the first routine called when the driver is
2807 * loaded. All it does is register with the PCI subsystem.
2808 */
2809static int __init hns3_init_module(void)
2810{
2811 int ret;
2812
2813 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2814 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2815
2816 client.type = HNAE3_CLIENT_KNIC;
2817 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2818 hns3_driver_name);
2819
2820 client.ops = &client_ops;
2821
2822 ret = hnae3_register_client(&client);
2823 if (ret)
2824 return ret;
2825
2826 ret = pci_register_driver(&hns3_driver);
2827 if (ret)
2828 hnae3_unregister_client(&client);
2829
2830 return ret;
2831}
2832module_init(hns3_init_module);
2833
2834/* hns3_exit_module - Driver exit cleanup routine
2835 * hns3_exit_module is called just before the driver is removed
2836 * from memory.
2837 */
2838static void __exit hns3_exit_module(void)
2839{
2840 pci_unregister_driver(&hns3_driver);
2841 hnae3_unregister_client(&client);
2842}
2843module_exit(hns3_exit_module);
2844
2845MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2846MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2847MODULE_LICENSE("GPL");
2848MODULE_ALIAS("pci:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
new file mode 100644
index 000000000000..a6e8f15a4669
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
@@ -0,0 +1,592 @@
1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#ifndef __HNS3_ENET_H
11#define __HNS3_ENET_H
12
13#include "hnae3.h"
14
15extern const char hns3_driver_version[];
16
17enum hns3_nic_state {
18 HNS3_NIC_STATE_TESTING,
19 HNS3_NIC_STATE_RESETTING,
20 HNS3_NIC_STATE_REINITING,
21 HNS3_NIC_STATE_DOWN,
22 HNS3_NIC_STATE_DISABLED,
23 HNS3_NIC_STATE_REMOVING,
24 HNS3_NIC_STATE_SERVICE_INITED,
25 HNS3_NIC_STATE_SERVICE_SCHED,
26 HNS3_NIC_STATE2_RESET_REQUESTED,
27 HNS3_NIC_STATE_MAX
28};
29
30#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
31#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
32#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
33#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C
34#define HNS3_RING_RX_RING_TAIL_REG 0x00018
35#define HNS3_RING_RX_RING_HEAD_REG 0x0001C
36#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020
37#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
38
39#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
40#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
41#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
42#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C
43#define HNS3_RING_TX_RING_TAIL_REG 0x00058
44#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
45#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
46#define HNS3_RING_TX_RING_OFFSET_REG 0x00064
47#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
48
49#define HNS3_RING_PREFETCH_EN_REG 0x0007C
50#define HNS3_RING_CFG_VF_NUM_REG 0x00080
51#define HNS3_RING_ASID_REG 0x0008C
52#define HNS3_RING_RX_VM_REG 0x00090
53#define HNS3_RING_T0_BE_RST 0x00094
54#define HNS3_RING_COULD_BE_RST 0x00098
55#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
56
57#define HNS3_RING_INTMSK_RXWL_REG 0x000A0
58#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4
59#define HNS3_RX_RING_INT_STS_REG 0x000A8
60#define HNS3_RING_INTMSK_TXWL_REG 0x000AC
61#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0
62#define HNS3_TX_RING_INT_STS_REG 0x000B4
63#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8
64#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC
65#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4
66#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8
67
68#define HNS3_RING_MB_CTRL_REG 0x00100
69#define HNS3_RING_MB_DATA_BASE_REG 0x00200
70
71#define HNS3_TX_REG_OFFSET 0x40
72
73#define HNS3_RX_HEAD_SIZE 256
74
75#define HNS3_TX_TIMEOUT (5 * HZ)
76#define HNS3_RING_NAME_LEN 16
77#define HNS3_BUFFER_SIZE_2048 2048
78#define HNS3_RING_MAX_PENDING 32768
79
80#define HNS3_BD_SIZE_512_TYPE 0
81#define HNS3_BD_SIZE_1024_TYPE 1
82#define HNS3_BD_SIZE_2048_TYPE 2
83#define HNS3_BD_SIZE_4096_TYPE 3
84
85#define HNS3_RX_FLAG_VLAN_PRESENT 0x1
86#define HNS3_RX_FLAG_L3ID_IPV4 0x0
87#define HNS3_RX_FLAG_L3ID_IPV6 0x1
88#define HNS3_RX_FLAG_L4ID_UDP 0x0
89#define HNS3_RX_FLAG_L4ID_TCP 0x1
90
91#define HNS3_RXD_DMAC_S 0
92#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
93#define HNS3_RXD_VLAN_S 2
94#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
95#define HNS3_RXD_L3ID_S 4
96#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
97#define HNS3_RXD_L4ID_S 8
98#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
99#define HNS3_RXD_FRAG_B 12
100#define HNS3_RXD_L2E_B 16
101#define HNS3_RXD_L3E_B 17
102#define HNS3_RXD_L4E_B 18
103#define HNS3_RXD_TRUNCAT_B 19
104#define HNS3_RXD_HOI_B 20
105#define HNS3_RXD_DOI_B 21
106#define HNS3_RXD_OL3E_B 22
107#define HNS3_RXD_OL4E_B 23
108
109#define HNS3_RXD_ODMAC_S 0
110#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
111#define HNS3_RXD_OVLAN_S 2
112#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
113#define HNS3_RXD_OL3ID_S 4
114#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
115#define HNS3_RXD_OL4ID_S 8
116#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
117#define HNS3_RXD_FBHI_S 12
118#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
119#define HNS3_RXD_FBLI_S 14
120#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
121
122#define HNS3_RXD_BDTYPE_S 0
123#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
124#define HNS3_RXD_VLD_B 4
125#define HNS3_RXD_UDP0_B 5
126#define HNS3_RXD_EXTEND_B 7
127#define HNS3_RXD_FE_B 8
128#define HNS3_RXD_LUM_B 9
129#define HNS3_RXD_CRCP_B 10
130#define HNS3_RXD_L3L4P_B 11
131#define HNS3_RXD_TSIND_S 12
132#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
133#define HNS3_RXD_LKBK_B 15
134#define HNS3_RXD_HDL_S 16
135#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
136#define HNS3_RXD_HSIND_B 31
137
138#define HNS3_TXD_L3T_S 0
139#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
140#define HNS3_TXD_L4T_S 2
141#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
142#define HNS3_TXD_L3CS_B 4
143#define HNS3_TXD_L4CS_B 5
144#define HNS3_TXD_VLAN_B 6
145#define HNS3_TXD_TSO_B 7
146
147#define HNS3_TXD_L2LEN_S 8
148#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
149#define HNS3_TXD_L3LEN_S 16
150#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
151#define HNS3_TXD_L4LEN_S 24
152#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
153
154#define HNS3_TXD_OL3T_S 0
155#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
156#define HNS3_TXD_OVLAN_B 2
157#define HNS3_TXD_MACSEC_B 3
158#define HNS3_TXD_TUNTYPE_S 4
159#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
160
161#define HNS3_TXD_BDTYPE_S 0
162#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
163#define HNS3_TXD_FE_B 4
164#define HNS3_TXD_SC_S 5
165#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
166#define HNS3_TXD_EXTEND_B 7
167#define HNS3_TXD_VLD_B 8
168#define HNS3_TXD_RI_B 9
169#define HNS3_TXD_RA_B 10
170#define HNS3_TXD_TSYN_B 11
171#define HNS3_TXD_DECTTL_S 12
172#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
173
174#define HNS3_TXD_MSS_S 0
175#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
176
177#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
178#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
179
180#define HNS3_VECTOR_NOT_INITED 0
181#define HNS3_VECTOR_INITED 1
182
183#define HNS3_MAX_BD_SIZE 65535
184#define HNS3_MAX_BD_PER_FRAG 8
185#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
186
187#define HNS3_VECTOR_GL0_OFFSET 0x100
188#define HNS3_VECTOR_GL1_OFFSET 0x200
189#define HNS3_VECTOR_GL2_OFFSET 0x300
190#define HNS3_VECTOR_RL_OFFSET 0x900
191#define HNS3_VECTOR_RL_EN_B 6
192
193enum hns3_pkt_l3t_type {
194 HNS3_L3T_NONE,
195 HNS3_L3T_IPV6,
196 HNS3_L3T_IPV4,
197 HNS3_L3T_RESERVED
198};
199
200enum hns3_pkt_l4t_type {
201 HNS3_L4T_UNKNOWN,
202 HNS3_L4T_TCP,
203 HNS3_L4T_UDP,
204 HNS3_L4T_SCTP
205};
206
207enum hns3_pkt_ol3t_type {
208 HNS3_OL3T_NONE,
209 HNS3_OL3T_IPV6,
210 HNS3_OL3T_IPV4_NO_CSUM,
211 HNS3_OL3T_IPV4_CSUM
212};
213
214enum hns3_pkt_tun_type {
215 HNS3_TUN_NONE,
216 HNS3_TUN_MAC_IN_UDP,
217 HNS3_TUN_NVGRE,
218 HNS3_TUN_OTHER
219};
220
221/* hardware spec ring buffer format */
222struct __packed hns3_desc {
223 __le64 addr;
224 union {
225 struct {
226 __le16 vlan_tag;
227 __le16 send_size;
228 union {
229 __le32 type_cs_vlan_tso_len;
230 struct {
231 __u8 type_cs_vlan_tso;
232 __u8 l2_len;
233 __u8 l3_len;
234 __u8 l4_len;
235 };
236 };
237 __le16 outer_vlan_tag;
238 __le16 tv;
239
240 union {
241 __le32 ol_type_vlan_len_msec;
242 struct {
243 __u8 ol_type_vlan_msec;
244 __u8 ol2_len;
245 __u8 ol3_len;
246 __u8 ol4_len;
247 };
248 };
249
250 __le32 paylen;
251 __le16 bdtp_fe_sc_vld_ra_ri;
252 __le16 mss;
253 } tx;
254
255 struct {
256 __le32 l234_info;
257 __le16 pkt_len;
258 __le16 size;
259
260 __le32 rss_hash;
261 __le16 fd_id;
262 __le16 vlan_tag;
263
264 union {
265 __le32 ol_info;
266 struct {
267 __le16 o_dm_vlan_id_fb;
268 __le16 ot_vlan_tag;
269 };
270 };
271
272 __le32 bd_base_info;
273 } rx;
274 };
275};
276
277struct hns3_desc_cb {
278 dma_addr_t dma; /* dma address of this desc */
279 void *buf; /* cpu addr for a desc */
280
281 /* priv data for the desc, e.g. skb when use with ip stack*/
282 void *priv;
283 u16 page_offset;
284 u16 reuse_flag;
285
286 u16 length; /* length of the buffer */
287
288 /* desc type, used by the ring user to mark the type of the priv data */
289 u16 type;
290};
291
292enum hns3_pkt_l3type {
293 HNS3_L3_TYPE_IPV4,
294 HNS3_L3_TYPE_IPV6,
295 HNS3_L3_TYPE_ARP,
296 HNS3_L3_TYPE_RARP,
297 HNS3_L3_TYPE_IPV4_OPT,
298 HNS3_L3_TYPE_IPV6_EXT,
299 HNS3_L3_TYPE_LLDP,
300 HNS3_L3_TYPE_BPDU,
301 HNS3_L3_TYPE_MAC_PAUSE,
302 HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
303
304 /* reserved for 0xA~0xB*/
305
306 HNS3_L3_TYPE_CNM = 0xc,
307
308 /* reserved for 0xD~0xE*/
309
310 HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
311};
312
313enum hns3_pkt_l4type {
314 HNS3_L4_TYPE_UDP,
315 HNS3_L4_TYPE_TCP,
316 HNS3_L4_TYPE_GRE,
317 HNS3_L4_TYPE_SCTP,
318 HNS3_L4_TYPE_IGMP,
319 HNS3_L4_TYPE_ICMP,
320
321 /* reserved for 0x6~0xE */
322
323 HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */
324};
325
326enum hns3_pkt_ol3type {
327 HNS3_OL3_TYPE_IPV4 = 0,
328 HNS3_OL3_TYPE_IPV6,
329 /* reserved for 0x2~0x3 */
330 HNS3_OL3_TYPE_IPV4_OPT = 4,
331 HNS3_OL3_TYPE_IPV6_EXT,
332
333 /* reserved for 0x6~0xE*/
334
335 HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
336};
337
338enum hns3_pkt_ol4type {
339 HNS3_OL4_TYPE_NO_TUN,
340 HNS3_OL4_TYPE_MAC_IN_UDP,
341 HNS3_OL4_TYPE_NVGRE,
342 HNS3_OL4_TYPE_UNKNOWN
343};
344
345struct ring_stats {
346 u64 io_err_cnt;
347 u64 sw_err_cnt;
348 u64 seg_pkt_cnt;
349 union {
350 struct {
351 u64 tx_pkts;
352 u64 tx_bytes;
353 u64 tx_err_cnt;
354 u64 restart_queue;
355 u64 tx_busy;
356 };
357 struct {
358 u64 rx_pkts;
359 u64 rx_bytes;
360 u64 rx_err_cnt;
361 u64 reuse_pg_cnt;
362 u64 err_pkt_len;
363 u64 non_vld_descs;
364 u64 err_bd_num;
365 u64 l2_err;
366 u64 l3l4_csum_err;
367 };
368 };
369};
370
371struct hns3_enet_ring {
372 u8 __iomem *io_base; /* base io address for the ring */
373 struct hns3_desc *desc; /* dma map address space */
374 struct hns3_desc_cb *desc_cb;
375 struct hns3_enet_ring *next;
376 struct hns3_enet_tqp_vector *tqp_vector;
377 struct hnae3_queue *tqp;
378 char ring_name[HNS3_RING_NAME_LEN];
379 struct device *dev; /* will be used for DMA mapping of descriptors */
380
381 /* statistic */
382 struct ring_stats stats;
383 struct u64_stats_sync syncp;
384
385 dma_addr_t desc_dma_addr;
386 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
387 u16 desc_num; /* total number of desc */
388 u16 max_desc_num_per_pkt;
389 u16 max_raw_data_sz_per_desc;
390 u16 max_pkt_size;
391 int next_to_use; /* idx of next spare desc */
392
393 /* idx of lastest sent desc, the ring is empty when equal to
394 * next_to_use
395 */
396 int next_to_clean;
397
398 u32 flag; /* ring attribute */
399 int irq_init_flag;
400
401 int numa_node;
402 cpumask_t affinity_mask;
403};
404
405struct hns_queue;
406
407struct hns3_nic_ring_data {
408 struct hns3_enet_ring *ring;
409 struct napi_struct napi;
410 int queue_index;
411 int (*poll_one)(struct hns3_nic_ring_data *, int, void *);
412 void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *);
413 void (*fini_process)(struct hns3_nic_ring_data *);
414};
415
416struct hns3_nic_ops {
417 int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
418 int size, dma_addr_t dma, int frag_end,
419 enum hns_desc_type type);
420 int (*maybe_stop_tx)(struct sk_buff **out_skb,
421 int *bnum, struct hns3_enet_ring *ring);
422 void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
423};
424
425enum hns3_flow_level_range {
426 HNS3_FLOW_LOW = 0,
427 HNS3_FLOW_MID = 1,
428 HNS3_FLOW_HIGH = 2,
429 HNS3_FLOW_ULTRA = 3,
430};
431
432enum hns3_link_mode_bits {
433 HNS3_LM_FIBRE_BIT = BIT(0),
434 HNS3_LM_AUTONEG_BIT = BIT(1),
435 HNS3_LM_TP_BIT = BIT(2),
436 HNS3_LM_PAUSE_BIT = BIT(3),
437 HNS3_LM_BACKPLANE_BIT = BIT(4),
438 HNS3_LM_10BASET_HALF_BIT = BIT(5),
439 HNS3_LM_10BASET_FULL_BIT = BIT(6),
440 HNS3_LM_100BASET_HALF_BIT = BIT(7),
441 HNS3_LM_100BASET_FULL_BIT = BIT(8),
442 HNS3_LM_1000BASET_FULL_BIT = BIT(9),
443 HNS3_LM_10000BASEKR_FULL_BIT = BIT(10),
444 HNS3_LM_25000BASEKR_FULL_BIT = BIT(11),
445 HNS3_LM_40000BASELR4_FULL_BIT = BIT(12),
446 HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13),
447 HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14),
448 HNS3_LM_COUNT = 15
449};
450
451#define HNS3_INT_GL_50K 0x000A
452#define HNS3_INT_GL_20K 0x0019
453#define HNS3_INT_GL_18K 0x001B
454#define HNS3_INT_GL_8K 0x003E
455
456struct hns3_enet_ring_group {
457 /* array of pointers to rings */
458 struct hns3_enet_ring *ring;
459 u64 total_bytes; /* total bytes processed this group */
460 u64 total_packets; /* total packets processed this group */
461 u16 count;
462 enum hns3_flow_level_range flow_level;
463 u16 int_gl;
464};
465
466struct hns3_enet_tqp_vector {
467 struct hnae3_handle *handle;
468 u8 __iomem *mask_addr;
469 int vector_irq;
470 int irq_init_flag;
471
472 u16 idx; /* index in the TQP vector array per handle. */
473
474 struct napi_struct napi;
475
476 struct hns3_enet_ring_group rx_group;
477 struct hns3_enet_ring_group tx_group;
478
479 u16 num_tqps; /* total number of tqps in TQP vector */
480
481 cpumask_t affinity_mask;
482 char name[HNAE3_INT_NAME_LEN];
483
484 /* when 0 should adjust interrupt coalesce parameter */
485 u8 int_adapt_down;
486} ____cacheline_internodealigned_in_smp;
487
488enum hns3_udp_tnl_type {
489 HNS3_UDP_TNL_VXLAN,
490 HNS3_UDP_TNL_GENEVE,
491 HNS3_UDP_TNL_MAX,
492};
493
494struct hns3_udp_tunnel {
495 u16 dst_port;
496 int used;
497};
498
499struct hns3_nic_priv {
500 struct hnae3_handle *ae_handle;
501 u32 enet_ver;
502 u32 port_id;
503 struct net_device *netdev;
504 struct device *dev;
505 struct hns3_nic_ops ops;
506
507 /**
508 * the cb for nic to manage the ring buffer, the first half of the
509 * array is for tx_ring and vice versa for the second half
510 */
511 struct hns3_nic_ring_data *ring_data;
512 struct hns3_enet_tqp_vector *tqp_vector;
513 u16 vector_num;
514
515 /* The most recently read link state */
516 int link;
517 u64 tx_timeout_count;
518
519 unsigned long state;
520
521 struct timer_list service_timer;
522
523 struct work_struct service_task;
524
525 struct notifier_block notifier_block;
526 /* Vxlan/Geneve information */
527 struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
528};
529
530union l3_hdr_info {
531 struct iphdr *v4;
532 struct ipv6hdr *v6;
533 unsigned char *hdr;
534};
535
536union l4_hdr_info {
537 struct tcphdr *tcp;
538 struct udphdr *udp;
539 unsigned char *hdr;
540};
541
542/* the distance between [begin, end) in a ring buffer
543 * note: there is a unuse slot between the begin and the end
544 */
545static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
546{
547 return (end - begin + ring->desc_num) % ring->desc_num;
548}
549
550static inline int ring_space(struct hns3_enet_ring *ring)
551{
552 return ring->desc_num -
553 ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
554}
555
556static inline int is_ring_empty(struct hns3_enet_ring *ring)
557{
558 return ring->next_to_use == ring->next_to_clean;
559}
560
561static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
562{
563 u8 __iomem *reg_addr = READ_ONCE(base);
564
565 writel(value, reg_addr + reg);
566}
567
568#define hns3_write_dev(a, reg, value) \
569 hns3_write_reg((a)->io_base, (reg), (value))
570
571#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
572 (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
573
574#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
575
576#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
577 DMA_TO_DEVICE : DMA_FROM_DEVICE)
578
579#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
580
581#define hnae_buf_size(_ring) ((_ring)->buf_size)
582#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
583#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
584
585/* iterator for handling rings in ring group */
586#define hns3_for_each_ring(pos, head) \
587 for (pos = (head).ring; pos; pos = pos->next)
588
589void hns3_ethtool_set_ops(struct net_device *netdev);
590
591int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
592#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
new file mode 100644
index 000000000000..0ad65e47c77e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
@@ -0,0 +1,482 @@
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/string.h>
12
13#include "hns3_enet.h"
14
15struct hns3_stats {
16 char stats_string[ETH_GSTRING_LEN];
17 int stats_size;
18 int stats_offset;
19};
20
21/* tqp related stats */
22#define HNS3_TQP_STAT(_string, _member) { \
23 .stats_string = _string, \
24 .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \
25 .stats_offset = offsetof(struct hns3_enet_ring, stats), \
26} \
27
28static const struct hns3_stats hns3_txq_stats[] = {
29 /* Tx per-queue statistics */
30 HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
31 HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
32 HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
33 HNS3_TQP_STAT("tx_pkts", tx_pkts),
34 HNS3_TQP_STAT("tx_bytes", tx_bytes),
35 HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
36 HNS3_TQP_STAT("tx_restart_queue", restart_queue),
37 HNS3_TQP_STAT("tx_busy", tx_busy),
38};
39
40#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
41
42static const struct hns3_stats hns3_rxq_stats[] = {
43 /* Rx per-queue statistics */
44 HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
45 HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
46 HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
47 HNS3_TQP_STAT("rx_pkts", rx_pkts),
48 HNS3_TQP_STAT("rx_bytes", rx_bytes),
49 HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
50 HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
51 HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
52 HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
53 HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
54 HNS3_TQP_STAT("rx_l2_err", l2_err),
55 HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
56};
57
58#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
59
60#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
61
62struct hns3_link_mode_mapping {
63 u32 hns3_link_mode;
64 u32 ethtool_link_mode;
65};
66
67static const struct hns3_link_mode_mapping hns3_lm_map[] = {
68 {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
69 {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
70 {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
71 {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
72 {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
73 {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT},
74 {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
75 {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT},
76 {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
77 {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
78};
79
80static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd,
81 bool is_advertised)
82{
83 int i;
84
85 for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) {
86 if (!(caps & hns3_lm_map[i].hns3_link_mode))
87 continue;
88
89 if (is_advertised) {
90 ethtool_link_ksettings_zero_link_mode(cmd,
91 advertising);
92 __set_bit(hns3_lm_map[i].ethtool_link_mode,
93 cmd->link_modes.advertising);
94 } else {
95 ethtool_link_ksettings_zero_link_mode(cmd,
96 supported);
97 __set_bit(hns3_lm_map[i].ethtool_link_mode,
98 cmd->link_modes.supported);
99 }
100 }
101}
102
103static int hns3_get_sset_count(struct net_device *netdev, int stringset)
104{
105 struct hns3_nic_priv *priv = netdev_priv(netdev);
106 struct hnae3_handle *h = priv->ae_handle;
107 const struct hnae3_ae_ops *ops = h->ae_algo->ops;
108
109 if (!ops->get_sset_count)
110 return -EOPNOTSUPP;
111
112 switch (stringset) {
113 case ETH_SS_STATS:
114 return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) +
115 ops->get_sset_count(h, stringset));
116
117 case ETH_SS_TEST:
118 return ops->get_sset_count(h, stringset);
119 }
120
121 return 0;
122}
123
124static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
125 u32 stat_count, u32 num_tqps)
126{
127#define MAX_PREFIX_SIZE (8 + 4)
128 u32 size_left;
129 u32 i, j;
130 u32 n1;
131
132 for (i = 0; i < num_tqps; i++) {
133 for (j = 0; j < stat_count; j++) {
134 data[ETH_GSTRING_LEN - 1] = '\0';
135
136 /* first, prepend the prefix string */
137 n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
138 n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
139 size_left = (ETH_GSTRING_LEN - 1) - n1;
140
141 /* now, concatenate the stats string to it */
142 strncat(data, stats[j].stats_string, size_left);
143 data += ETH_GSTRING_LEN;
144 }
145 }
146
147 return data;
148}
149
150static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
151{
152 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
153
154 /* get strings for Tx */
155 data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
156 kinfo->num_tqps);
157
158 /* get strings for Rx */
159 data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
160 kinfo->num_tqps);
161
162 return data;
163}
164
165static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
166{
167 struct hns3_nic_priv *priv = netdev_priv(netdev);
168 struct hnae3_handle *h = priv->ae_handle;
169 const struct hnae3_ae_ops *ops = h->ae_algo->ops;
170 char *buff = (char *)data;
171
172 if (!ops->get_strings)
173 return;
174
175 switch (stringset) {
176 case ETH_SS_STATS:
177 buff = hns3_get_strings_tqps(h, buff);
178 h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
179 break;
180 case ETH_SS_TEST:
181 ops->get_strings(h, stringset, data);
182 break;
183 }
184}
185
186static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
187{
188 struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv;
189 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
190 struct hns3_enet_ring *ring;
191 u8 *stat;
192 u32 i;
193
194 /* get stats for Tx */
195 for (i = 0; i < kinfo->num_tqps; i++) {
196 ring = nic_priv->ring_data[i].ring;
197 for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
198 stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
199 *data++ = *(u64 *)stat;
200 }
201 }
202
203 /* get stats for Rx */
204 for (i = 0; i < kinfo->num_tqps; i++) {
205 ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
206 for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
207 stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
208 *data++ = *(u64 *)stat;
209 }
210 }
211
212 return data;
213}
214
215/* hns3_get_stats - get detail statistics.
216 * @netdev: net device
217 * @stats: statistics info.
218 * @data: statistics data.
219 */
220void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
221 u64 *data)
222{
223 struct hns3_nic_priv *priv = netdev_priv(netdev);
224 struct hnae3_handle *h = priv->ae_handle;
225 u64 *p = data;
226
227 if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
228 netdev_err(netdev, "could not get any statistics\n");
229 return;
230 }
231
232 h->ae_algo->ops->update_stats(h, &netdev->stats);
233
234 /* get per-queue stats */
235 p = hns3_get_stats_tqps(h, p);
236
237 /* get MAC & other misc hardware stats */
238 h->ae_algo->ops->get_stats(h, p);
239}
240
241static void hns3_get_drvinfo(struct net_device *netdev,
242 struct ethtool_drvinfo *drvinfo)
243{
244 struct hns3_nic_priv *priv = netdev_priv(netdev);
245 struct hnae3_handle *h = priv->ae_handle;
246
247 strncpy(drvinfo->version, hns3_driver_version,
248 sizeof(drvinfo->version));
249 drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
250
251 strncpy(drvinfo->driver, h->pdev->driver->name,
252 sizeof(drvinfo->driver));
253 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
254
255 strncpy(drvinfo->bus_info, pci_name(h->pdev),
256 sizeof(drvinfo->bus_info));
257 drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
258
259 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
260 priv->ae_handle->ae_algo->ops->get_fw_version(h));
261}
262
263static u32 hns3_get_link(struct net_device *netdev)
264{
265 struct hns3_nic_priv *priv = netdev_priv(netdev);
266 struct hnae3_handle *h;
267
268 h = priv->ae_handle;
269
270 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
271 return h->ae_algo->ops->get_status(h);
272 else
273 return 0;
274}
275
276static void hns3_get_ringparam(struct net_device *netdev,
277 struct ethtool_ringparam *param)
278{
279 struct hns3_nic_priv *priv = netdev_priv(netdev);
280 int queue_num = priv->ae_handle->kinfo.num_tqps;
281
282 param->tx_max_pending = HNS3_RING_MAX_PENDING;
283 param->rx_max_pending = HNS3_RING_MAX_PENDING;
284
285 param->tx_pending = priv->ring_data[0].ring->desc_num;
286 param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
287}
288
289static void hns3_get_pauseparam(struct net_device *netdev,
290 struct ethtool_pauseparam *param)
291{
292 struct hns3_nic_priv *priv = netdev_priv(netdev);
293 struct hnae3_handle *h = priv->ae_handle;
294
295 if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
296 h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
297 &param->rx_pause, &param->tx_pause);
298}
299
300static int hns3_get_link_ksettings(struct net_device *netdev,
301 struct ethtool_link_ksettings *cmd)
302{
303 struct hns3_nic_priv *priv = netdev_priv(netdev);
304 struct hnae3_handle *h = priv->ae_handle;
305 u32 supported_caps;
306 u32 advertised_caps;
307 u8 media_type;
308 u8 link_stat;
309 u8 auto_neg;
310 u8 duplex;
311 u32 speed;
312
313 if (!h->ae_algo || !h->ae_algo->ops)
314 return -EOPNOTSUPP;
315
316 /* 1.auto_neg&speed&duplex from cmd */
317 if (h->ae_algo->ops->get_ksettings_an_result) {
318 h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg,
319 &speed, &duplex);
320 cmd->base.autoneg = auto_neg;
321 cmd->base.speed = speed;
322 cmd->base.duplex = duplex;
323
324 link_stat = hns3_get_link(netdev);
325 if (!link_stat) {
326 cmd->base.speed = (u32)SPEED_UNKNOWN;
327 cmd->base.duplex = DUPLEX_UNKNOWN;
328 }
329 }
330
331 /* 2.media_type get from bios parameter block */
332 if (h->ae_algo->ops->get_media_type)
333 h->ae_algo->ops->get_media_type(h, &media_type);
334
335 switch (media_type) {
336 case HNAE3_MEDIA_TYPE_FIBER:
337 cmd->base.port = PORT_FIBRE;
338 supported_caps = HNS3_LM_FIBRE_BIT | HNS3_LM_AUTONEG_BIT |
339 HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT;
340
341 advertised_caps = supported_caps;
342 break;
343 case HNAE3_MEDIA_TYPE_COPPER:
344 cmd->base.port = PORT_TP;
345 supported_caps = HNS3_LM_TP_BIT | HNS3_LM_AUTONEG_BIT |
346 HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT |
347 HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT |
348 HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT;
349 advertised_caps = supported_caps;
350 break;
351 case HNAE3_MEDIA_TYPE_BACKPLANE:
352 cmd->base.port = PORT_NONE;
353 supported_caps = HNS3_LM_BACKPLANE_BIT | HNS3_LM_PAUSE_BIT |
354 HNS3_LM_AUTONEG_BIT | HNS3_LM_1000BASET_FULL_BIT |
355 HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT |
356 HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT;
357
358 advertised_caps = supported_caps;
359 break;
360 case HNAE3_MEDIA_TYPE_UNKNOWN:
361 default:
362 cmd->base.port = PORT_OTHER;
363 supported_caps = 0;
364 advertised_caps = 0;
365 break;
366 }
367
368 /* now, map driver link modes to ethtool link modes */
369 hns3_driv_to_eth_caps(supported_caps, cmd, false);
370 hns3_driv_to_eth_caps(advertised_caps, cmd, true);
371
372 /* 3.mdix_ctrl&mdix get from phy reg */
373 if (h->ae_algo->ops->get_mdix_mode)
374 h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
375 &cmd->base.eth_tp_mdix);
376 /* 4.mdio_support */
377 cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
378
379 return 0;
380}
381
382static u32 hns3_get_rss_key_size(struct net_device *netdev)
383{
384 struct hns3_nic_priv *priv = netdev_priv(netdev);
385 struct hnae3_handle *h = priv->ae_handle;
386
387 if (!h->ae_algo || !h->ae_algo->ops ||
388 !h->ae_algo->ops->get_rss_key_size)
389 return -EOPNOTSUPP;
390
391 return h->ae_algo->ops->get_rss_key_size(h);
392}
393
394static u32 hns3_get_rss_indir_size(struct net_device *netdev)
395{
396 struct hns3_nic_priv *priv = netdev_priv(netdev);
397 struct hnae3_handle *h = priv->ae_handle;
398
399 if (!h->ae_algo || !h->ae_algo->ops ||
400 !h->ae_algo->ops->get_rss_indir_size)
401 return -EOPNOTSUPP;
402
403 return h->ae_algo->ops->get_rss_indir_size(h);
404}
405
406static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
407 u8 *hfunc)
408{
409 struct hns3_nic_priv *priv = netdev_priv(netdev);
410 struct hnae3_handle *h = priv->ae_handle;
411
412 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
413 return -EOPNOTSUPP;
414
415 return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
416}
417
418static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
419 const u8 *key, const u8 hfunc)
420{
421 struct hns3_nic_priv *priv = netdev_priv(netdev);
422 struct hnae3_handle *h = priv->ae_handle;
423
424 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
425 return -EOPNOTSUPP;
426
427 /* currently we only support Toeplitz hash */
428 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
429 netdev_err(netdev,
430 "hash func not supported (only Toeplitz hash)\n");
431 return -EOPNOTSUPP;
432 }
433 if (!indir) {
434 netdev_err(netdev,
435 "set rss failed for indir is empty\n");
436 return -EOPNOTSUPP;
437 }
438
439 return h->ae_algo->ops->set_rss(h, indir, key, hfunc);
440}
441
442static int hns3_get_rxnfc(struct net_device *netdev,
443 struct ethtool_rxnfc *cmd,
444 u32 *rule_locs)
445{
446 struct hns3_nic_priv *priv = netdev_priv(netdev);
447 struct hnae3_handle *h = priv->ae_handle;
448
449 if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size)
450 return -EOPNOTSUPP;
451
452 switch (cmd->cmd) {
453 case ETHTOOL_GRXRINGS:
454 cmd->data = h->ae_algo->ops->get_tc_size(h);
455 break;
456 default:
457 return -EOPNOTSUPP;
458 }
459
460 return 0;
461}
462
463static const struct ethtool_ops hns3_ethtool_ops = {
464 .get_drvinfo = hns3_get_drvinfo,
465 .get_link = hns3_get_link,
466 .get_ringparam = hns3_get_ringparam,
467 .get_pauseparam = hns3_get_pauseparam,
468 .get_strings = hns3_get_strings,
469 .get_ethtool_stats = hns3_get_stats,
470 .get_sset_count = hns3_get_sset_count,
471 .get_rxnfc = hns3_get_rxnfc,
472 .get_rxfh_key_size = hns3_get_rss_key_size,
473 .get_rxfh_indir_size = hns3_get_rss_indir_size,
474 .get_rxfh = hns3_get_rss,
475 .set_rxfh = hns3_set_rss,
476 .get_link_ksettings = hns3_get_link_ksettings,
477};
478
479void hns3_ethtool_set_ops(struct net_device *netdev)
480{
481 netdev->ethtool_ops = &hns3_ethtool_ops;
482}