diff options
author | dingtianhong <dingtianhong@huawei.com> | 2015-01-14 01:34:14 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-14 01:52:45 -0500 |
commit | a41ea46a9a128abe5210381ba7902f5208096d53 (patch) | |
tree | 01607c0a2a80c3bd78527fc895418006d8f1ed05 | |
parent | 4a841ee928f430e466cf8e7ea8ad08eb13b1377c (diff) |
net: hisilicon: new hip04 ethernet driver
Support Hisilicon hip04 ethernet driver, including 100M / 1000M controller.
The controller has no tx done interrupt, reclaim xmitted buffer in the poll.
v13: Fix the problem of alignment parameters for function and checkpatch warming.
v12: According Alex's suggestion, modify the changelog and add MODULE_DEVICE_TABLE
for hip04 ethernet.
v11: Add ethtool support for tx coalecse getting and setting, the xmit_more
is not supported for this patch, but I think it could work for hip04,
will support it later after some tests for performance better.
Here are some performance test results by ping and iperf(add tx_coalesce_frames/users),
it looks that the performance and latency is more better by tx_coalesce_frames/usecs.
- Before:
$ ping 192.168.1.1 ...
=== 192.168.1.1 ping statistics ===
24 packets transmitted, 24 received, 0% packet loss, time 22999ms
rtt min/avg/max/mdev = 0.180/0.202/0.403/0.043 ms
$ iperf -c 192.168.1.1 ...
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 1.0 sec 115 MBytes 945 Mbits/sec
- After:
$ ping 192.168.1.1 ...
=== 192.168.1.1 ping statistics ===
24 packets transmitted, 24 received, 0% packet loss, time 22999ms
rtt min/avg/max/mdev = 0.178/0.190/0.380/0.041 ms
$ iperf -c 192.168.1.1 ...
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 1.0 sec 115 MBytes 965 Mbits/sec
v10: According David Miller and Arnd Bergmann's suggestion, add some modification
for v9 version
- drop the workqueue
- batch cleanup based on tx_coalesce_frames/usecs for better throughput
- use a reasonable default tx timeout (200us, could be shorted
based on measurements) with a range timer
- fix napi poll function return value
- use a lockless queue for cleanup
Signed-off-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/hisilicon/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hip04_eth.c | 969 |
2 files changed, 970 insertions, 1 deletions
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 40115a7e2ed5..6c14540a4dc5 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o | 5 | obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o |
6 | obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o | 6 | obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c new file mode 100644 index 000000000000..525214ef5984 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -0,0 +1,969 @@ | |||
1 | |||
2 | /* Copyright (c) 2014 Linaro Ltd. | ||
3 | * Copyright (c) 2014 Hisilicon Limited. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/etherdevice.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/ktime.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/phy.h> | ||
18 | #include <linux/of_mdio.h> | ||
19 | #include <linux/of_net.h> | ||
20 | #include <linux/mfd/syscon.h> | ||
21 | #include <linux/regmap.h> | ||
22 | |||
23 | #define PPE_CFG_RX_ADDR 0x100 | ||
24 | #define PPE_CFG_POOL_GRP 0x300 | ||
25 | #define PPE_CFG_RX_BUF_SIZE 0x400 | ||
26 | #define PPE_CFG_RX_FIFO_SIZE 0x500 | ||
27 | #define PPE_CURR_BUF_CNT 0xa200 | ||
28 | |||
29 | #define GE_DUPLEX_TYPE 0x08 | ||
30 | #define GE_MAX_FRM_SIZE_REG 0x3c | ||
31 | #define GE_PORT_MODE 0x40 | ||
32 | #define GE_PORT_EN 0x44 | ||
33 | #define GE_SHORT_RUNTS_THR_REG 0x50 | ||
34 | #define GE_TX_LOCAL_PAGE_REG 0x5c | ||
35 | #define GE_TRANSMIT_CONTROL_REG 0x60 | ||
36 | #define GE_CF_CRC_STRIP_REG 0x1b0 | ||
37 | #define GE_MODE_CHANGE_REG 0x1b4 | ||
38 | #define GE_RECV_CONTROL_REG 0x1e0 | ||
39 | #define GE_STATION_MAC_ADDRESS 0x210 | ||
40 | #define PPE_CFG_CPU_ADD_ADDR 0x580 | ||
41 | #define PPE_CFG_MAX_FRAME_LEN_REG 0x408 | ||
42 | #define PPE_CFG_BUS_CTRL_REG 0x424 | ||
43 | #define PPE_CFG_RX_CTRL_REG 0x428 | ||
44 | #define PPE_CFG_RX_PKT_MODE_REG 0x438 | ||
45 | #define PPE_CFG_QOS_VMID_GEN 0x500 | ||
46 | #define PPE_CFG_RX_PKT_INT 0x538 | ||
47 | #define PPE_INTEN 0x600 | ||
48 | #define PPE_INTSTS 0x608 | ||
49 | #define PPE_RINT 0x604 | ||
50 | #define PPE_CFG_STS_MODE 0x700 | ||
51 | #define PPE_HIS_RX_PKT_CNT 0x804 | ||
52 | |||
53 | /* REG_INTERRUPT */ | ||
54 | #define RCV_INT BIT(10) | ||
55 | #define RCV_NOBUF BIT(8) | ||
56 | #define RCV_DROP BIT(7) | ||
57 | #define TX_DROP BIT(6) | ||
58 | #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP) | ||
59 | #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR) | ||
60 | |||
61 | /* TX descriptor config */ | ||
62 | #define TX_FREE_MEM BIT(0) | ||
63 | #define TX_READ_ALLOC_L3 BIT(1) | ||
64 | #define TX_FINISH_CACHE_INV BIT(2) | ||
65 | #define TX_CLEAR_WB BIT(4) | ||
66 | #define TX_L3_CHECKSUM BIT(5) | ||
67 | #define TX_LOOP_BACK BIT(11) | ||
68 | |||
69 | /* RX error */ | ||
70 | #define RX_PKT_DROP BIT(0) | ||
71 | #define RX_L2_ERR BIT(1) | ||
72 | #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR) | ||
73 | |||
74 | #define SGMII_SPEED_1000 0x08 | ||
75 | #define SGMII_SPEED_100 0x07 | ||
76 | #define SGMII_SPEED_10 0x06 | ||
77 | #define MII_SPEED_100 0x01 | ||
78 | #define MII_SPEED_10 0x00 | ||
79 | |||
80 | #define GE_DUPLEX_FULL BIT(0) | ||
81 | #define GE_DUPLEX_HALF 0x00 | ||
82 | #define GE_MODE_CHANGE_EN BIT(0) | ||
83 | |||
84 | #define GE_TX_AUTO_NEG BIT(5) | ||
85 | #define GE_TX_ADD_CRC BIT(6) | ||
86 | #define GE_TX_SHORT_PAD_THROUGH BIT(7) | ||
87 | |||
88 | #define GE_RX_STRIP_CRC BIT(0) | ||
89 | #define GE_RX_STRIP_PAD BIT(3) | ||
90 | #define GE_RX_PAD_EN BIT(4) | ||
91 | |||
92 | #define GE_AUTO_NEG_CTL BIT(0) | ||
93 | |||
94 | #define GE_RX_INT_THRESHOLD BIT(6) | ||
95 | #define GE_RX_TIMEOUT 0x04 | ||
96 | |||
97 | #define GE_RX_PORT_EN BIT(1) | ||
98 | #define GE_TX_PORT_EN BIT(2) | ||
99 | |||
100 | #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12) | ||
101 | |||
102 | #define PPE_CFG_RX_PKT_ALIGN BIT(18) | ||
103 | #define PPE_CFG_QOS_VMID_MODE BIT(14) | ||
104 | #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 | ||
105 | |||
106 | #define PPE_CFG_RX_FIFO_FSFU BIT(11) | ||
107 | #define PPE_CFG_RX_DEPTH_SHIFT 16 | ||
108 | #define PPE_CFG_RX_START_SHIFT 0 | ||
109 | #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 | ||
110 | |||
111 | #define PPE_CFG_BUS_LOCAL_REL BIT(14) | ||
112 | #define PPE_CFG_BUS_BIG_ENDIEN BIT(0) | ||
113 | |||
114 | #define RX_DESC_NUM 128 | ||
115 | #define TX_DESC_NUM 256 | ||
116 | #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1)) | ||
117 | #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1)) | ||
118 | |||
119 | #define GMAC_PPE_RX_PKT_MAX_LEN 379 | ||
120 | #define GMAC_MAX_PKT_LEN 1516 | ||
121 | #define GMAC_MIN_PKT_LEN 31 | ||
122 | #define RX_BUF_SIZE 1600 | ||
123 | #define RESET_TIMEOUT 1000 | ||
124 | #define TX_TIMEOUT (6 * HZ) | ||
125 | |||
126 | #define DRV_NAME "hip04-ether" | ||
127 | #define DRV_VERSION "v1.0" | ||
128 | |||
129 | #define HIP04_MAX_TX_COALESCE_USECS 200 | ||
130 | #define HIP04_MIN_TX_COALESCE_USECS 100 | ||
131 | #define HIP04_MAX_TX_COALESCE_FRAMES 200 | ||
132 | #define HIP04_MIN_TX_COALESCE_FRAMES 100 | ||
133 | |||
134 | struct tx_desc { | ||
135 | u32 send_addr; | ||
136 | u32 send_size; | ||
137 | u32 next_addr; | ||
138 | u32 cfg; | ||
139 | u32 wb_addr; | ||
140 | } __aligned(64); | ||
141 | |||
142 | struct rx_desc { | ||
143 | u16 reserved_16; | ||
144 | u16 pkt_len; | ||
145 | u32 reserve1[3]; | ||
146 | u32 pkt_err; | ||
147 | u32 reserve2[4]; | ||
148 | }; | ||
149 | |||
150 | struct hip04_priv { | ||
151 | void __iomem *base; | ||
152 | int phy_mode; | ||
153 | int chan; | ||
154 | unsigned int port; | ||
155 | unsigned int speed; | ||
156 | unsigned int duplex; | ||
157 | unsigned int reg_inten; | ||
158 | |||
159 | struct napi_struct napi; | ||
160 | struct net_device *ndev; | ||
161 | |||
162 | struct tx_desc *tx_desc; | ||
163 | dma_addr_t tx_desc_dma; | ||
164 | struct sk_buff *tx_skb[TX_DESC_NUM]; | ||
165 | dma_addr_t tx_phys[TX_DESC_NUM]; | ||
166 | unsigned int tx_head; | ||
167 | |||
168 | int tx_coalesce_frames; | ||
169 | int tx_coalesce_usecs; | ||
170 | struct hrtimer tx_coalesce_timer; | ||
171 | |||
172 | unsigned char *rx_buf[RX_DESC_NUM]; | ||
173 | dma_addr_t rx_phys[RX_DESC_NUM]; | ||
174 | unsigned int rx_head; | ||
175 | unsigned int rx_buf_size; | ||
176 | |||
177 | struct device_node *phy_node; | ||
178 | struct phy_device *phy; | ||
179 | struct regmap *map; | ||
180 | struct work_struct tx_timeout_task; | ||
181 | |||
182 | /* written only by tx cleanup */ | ||
183 | unsigned int tx_tail ____cacheline_aligned_in_smp; | ||
184 | }; | ||
185 | |||
186 | static inline unsigned int tx_count(unsigned int head, unsigned int tail) | ||
187 | { | ||
188 | return (head - tail) % (TX_DESC_NUM - 1); | ||
189 | } | ||
190 | |||
191 | static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) | ||
192 | { | ||
193 | struct hip04_priv *priv = netdev_priv(ndev); | ||
194 | u32 val; | ||
195 | |||
196 | priv->speed = speed; | ||
197 | priv->duplex = duplex; | ||
198 | |||
199 | switch (priv->phy_mode) { | ||
200 | case PHY_INTERFACE_MODE_SGMII: | ||
201 | if (speed == SPEED_1000) | ||
202 | val = SGMII_SPEED_1000; | ||
203 | else if (speed == SPEED_100) | ||
204 | val = SGMII_SPEED_100; | ||
205 | else | ||
206 | val = SGMII_SPEED_10; | ||
207 | break; | ||
208 | case PHY_INTERFACE_MODE_MII: | ||
209 | if (speed == SPEED_100) | ||
210 | val = MII_SPEED_100; | ||
211 | else | ||
212 | val = MII_SPEED_10; | ||
213 | break; | ||
214 | default: | ||
215 | netdev_warn(ndev, "not supported mode\n"); | ||
216 | val = MII_SPEED_10; | ||
217 | break; | ||
218 | } | ||
219 | writel_relaxed(val, priv->base + GE_PORT_MODE); | ||
220 | |||
221 | val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF; | ||
222 | writel_relaxed(val, priv->base + GE_DUPLEX_TYPE); | ||
223 | |||
224 | val = GE_MODE_CHANGE_EN; | ||
225 | writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG); | ||
226 | } | ||
227 | |||
228 | static void hip04_reset_ppe(struct hip04_priv *priv) | ||
229 | { | ||
230 | u32 val, tmp, timeout = 0; | ||
231 | |||
232 | do { | ||
233 | regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val); | ||
234 | regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp); | ||
235 | if (timeout++ > RESET_TIMEOUT) | ||
236 | break; | ||
237 | } while (val & 0xfff); | ||
238 | } | ||
239 | |||
240 | static void hip04_config_fifo(struct hip04_priv *priv) | ||
241 | { | ||
242 | u32 val; | ||
243 | |||
244 | val = readl_relaxed(priv->base + PPE_CFG_STS_MODE); | ||
245 | val |= PPE_CFG_STS_RX_PKT_CNT_RC; | ||
246 | writel_relaxed(val, priv->base + PPE_CFG_STS_MODE); | ||
247 | |||
248 | val = BIT(priv->port); | ||
249 | regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val); | ||
250 | |||
251 | val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT; | ||
252 | val |= PPE_CFG_QOS_VMID_MODE; | ||
253 | writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN); | ||
254 | |||
255 | val = RX_BUF_SIZE; | ||
256 | regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val); | ||
257 | |||
258 | val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT; | ||
259 | val |= PPE_CFG_RX_FIFO_FSFU; | ||
260 | val |= priv->chan << PPE_CFG_RX_START_SHIFT; | ||
261 | regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val); | ||
262 | |||
263 | val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT; | ||
264 | writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG); | ||
265 | |||
266 | val = PPE_CFG_RX_PKT_ALIGN; | ||
267 | writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG); | ||
268 | |||
269 | val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN; | ||
270 | writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG); | ||
271 | |||
272 | val = GMAC_PPE_RX_PKT_MAX_LEN; | ||
273 | writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG); | ||
274 | |||
275 | val = GMAC_MAX_PKT_LEN; | ||
276 | writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG); | ||
277 | |||
278 | val = GMAC_MIN_PKT_LEN; | ||
279 | writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG); | ||
280 | |||
281 | val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG); | ||
282 | val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH; | ||
283 | writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG); | ||
284 | |||
285 | val = GE_RX_STRIP_CRC; | ||
286 | writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG); | ||
287 | |||
288 | val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG); | ||
289 | val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN; | ||
290 | writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG); | ||
291 | |||
292 | val = GE_AUTO_NEG_CTL; | ||
293 | writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG); | ||
294 | } | ||
295 | |||
296 | static void hip04_mac_enable(struct net_device *ndev) | ||
297 | { | ||
298 | struct hip04_priv *priv = netdev_priv(ndev); | ||
299 | u32 val; | ||
300 | |||
301 | /* enable tx & rx */ | ||
302 | val = readl_relaxed(priv->base + GE_PORT_EN); | ||
303 | val |= GE_RX_PORT_EN | GE_TX_PORT_EN; | ||
304 | writel_relaxed(val, priv->base + GE_PORT_EN); | ||
305 | |||
306 | /* clear rx int */ | ||
307 | val = RCV_INT; | ||
308 | writel_relaxed(val, priv->base + PPE_RINT); | ||
309 | |||
310 | /* config recv int */ | ||
311 | val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT; | ||
312 | writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT); | ||
313 | |||
314 | /* enable interrupt */ | ||
315 | priv->reg_inten = DEF_INT_MASK; | ||
316 | writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); | ||
317 | } | ||
318 | |||
319 | static void hip04_mac_disable(struct net_device *ndev) | ||
320 | { | ||
321 | struct hip04_priv *priv = netdev_priv(ndev); | ||
322 | u32 val; | ||
323 | |||
324 | /* disable int */ | ||
325 | priv->reg_inten &= ~(DEF_INT_MASK); | ||
326 | writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); | ||
327 | |||
328 | /* disable tx & rx */ | ||
329 | val = readl_relaxed(priv->base + GE_PORT_EN); | ||
330 | val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN); | ||
331 | writel_relaxed(val, priv->base + GE_PORT_EN); | ||
332 | } | ||
333 | |||
334 | static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys) | ||
335 | { | ||
336 | writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR); | ||
337 | } | ||
338 | |||
339 | static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys) | ||
340 | { | ||
341 | regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys); | ||
342 | } | ||
343 | |||
344 | static u32 hip04_recv_cnt(struct hip04_priv *priv) | ||
345 | { | ||
346 | return readl(priv->base + PPE_HIS_RX_PKT_CNT); | ||
347 | } | ||
348 | |||
349 | static void hip04_update_mac_address(struct net_device *ndev) | ||
350 | { | ||
351 | struct hip04_priv *priv = netdev_priv(ndev); | ||
352 | |||
353 | writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])), | ||
354 | priv->base + GE_STATION_MAC_ADDRESS); | ||
355 | writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | | ||
356 | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])), | ||
357 | priv->base + GE_STATION_MAC_ADDRESS + 4); | ||
358 | } | ||
359 | |||
360 | static int hip04_set_mac_address(struct net_device *ndev, void *addr) | ||
361 | { | ||
362 | eth_mac_addr(ndev, addr); | ||
363 | hip04_update_mac_address(ndev); | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static int hip04_tx_reclaim(struct net_device *ndev, bool force) | ||
368 | { | ||
369 | struct hip04_priv *priv = netdev_priv(ndev); | ||
370 | unsigned tx_tail = priv->tx_tail; | ||
371 | struct tx_desc *desc; | ||
372 | unsigned int bytes_compl = 0, pkts_compl = 0; | ||
373 | unsigned int count; | ||
374 | |||
375 | smp_rmb(); | ||
376 | count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); | ||
377 | if (count == 0) | ||
378 | goto out; | ||
379 | |||
380 | while (count) { | ||
381 | desc = &priv->tx_desc[tx_tail]; | ||
382 | if (desc->send_addr != 0) { | ||
383 | if (force) | ||
384 | desc->send_addr = 0; | ||
385 | else | ||
386 | break; | ||
387 | } | ||
388 | |||
389 | if (priv->tx_phys[tx_tail]) { | ||
390 | dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], | ||
391 | priv->tx_skb[tx_tail]->len, | ||
392 | DMA_TO_DEVICE); | ||
393 | priv->tx_phys[tx_tail] = 0; | ||
394 | } | ||
395 | pkts_compl++; | ||
396 | bytes_compl += priv->tx_skb[tx_tail]->len; | ||
397 | dev_kfree_skb(priv->tx_skb[tx_tail]); | ||
398 | priv->tx_skb[tx_tail] = NULL; | ||
399 | tx_tail = TX_NEXT(tx_tail); | ||
400 | count--; | ||
401 | } | ||
402 | |||
403 | priv->tx_tail = tx_tail; | ||
404 | smp_wmb(); /* Ensure tx_tail visible to xmit */ | ||
405 | |||
406 | out: | ||
407 | if (pkts_compl || bytes_compl) | ||
408 | netdev_completed_queue(ndev, pkts_compl, bytes_compl); | ||
409 | |||
410 | if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1))) | ||
411 | netif_wake_queue(ndev); | ||
412 | |||
413 | return count; | ||
414 | } | ||
415 | |||
416 | static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
417 | { | ||
418 | struct hip04_priv *priv = netdev_priv(ndev); | ||
419 | struct net_device_stats *stats = &ndev->stats; | ||
420 | unsigned int tx_head = priv->tx_head, count; | ||
421 | struct tx_desc *desc = &priv->tx_desc[tx_head]; | ||
422 | dma_addr_t phys; | ||
423 | |||
424 | smp_rmb(); | ||
425 | count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); | ||
426 | if (count == (TX_DESC_NUM - 1)) { | ||
427 | netif_stop_queue(ndev); | ||
428 | return NETDEV_TX_BUSY; | ||
429 | } | ||
430 | |||
431 | phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); | ||
432 | if (dma_mapping_error(&ndev->dev, phys)) { | ||
433 | dev_kfree_skb(skb); | ||
434 | return NETDEV_TX_OK; | ||
435 | } | ||
436 | |||
437 | priv->tx_skb[tx_head] = skb; | ||
438 | priv->tx_phys[tx_head] = phys; | ||
439 | desc->send_addr = cpu_to_be32(phys); | ||
440 | desc->send_size = cpu_to_be32(skb->len); | ||
441 | desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); | ||
442 | phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc); | ||
443 | desc->wb_addr = cpu_to_be32(phys); | ||
444 | skb_tx_timestamp(skb); | ||
445 | |||
446 | hip04_set_xmit_desc(priv, phys); | ||
447 | priv->tx_head = TX_NEXT(tx_head); | ||
448 | count++; | ||
449 | netdev_sent_queue(ndev, skb->len); | ||
450 | |||
451 | stats->tx_bytes += skb->len; | ||
452 | stats->tx_packets++; | ||
453 | |||
454 | /* Ensure tx_head update visible to tx reclaim */ | ||
455 | smp_wmb(); | ||
456 | |||
457 | /* queue is getting full, better start cleaning up now */ | ||
458 | if (count >= priv->tx_coalesce_frames) { | ||
459 | if (napi_schedule_prep(&priv->napi)) { | ||
460 | /* disable rx interrupt and timer */ | ||
461 | priv->reg_inten &= ~(RCV_INT); | ||
462 | writel_relaxed(DEF_INT_MASK & ~RCV_INT, | ||
463 | priv->base + PPE_INTEN); | ||
464 | hrtimer_cancel(&priv->tx_coalesce_timer); | ||
465 | __napi_schedule(&priv->napi); | ||
466 | } | ||
467 | } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { | ||
468 | /* cleanup not pending yet, start a new timer */ | ||
469 | hrtimer_start_expires(&priv->tx_coalesce_timer, | ||
470 | HRTIMER_MODE_REL); | ||
471 | } | ||
472 | |||
473 | return NETDEV_TX_OK; | ||
474 | } | ||
475 | |||
476 | static int hip04_rx_poll(struct napi_struct *napi, int budget) | ||
477 | { | ||
478 | struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); | ||
479 | struct net_device *ndev = priv->ndev; | ||
480 | struct net_device_stats *stats = &ndev->stats; | ||
481 | unsigned int cnt = hip04_recv_cnt(priv); | ||
482 | struct rx_desc *desc; | ||
483 | struct sk_buff *skb; | ||
484 | unsigned char *buf; | ||
485 | bool last = false; | ||
486 | dma_addr_t phys; | ||
487 | int rx = 0; | ||
488 | int tx_remaining; | ||
489 | u16 len; | ||
490 | u32 err; | ||
491 | |||
492 | while (cnt && !last) { | ||
493 | buf = priv->rx_buf[priv->rx_head]; | ||
494 | skb = build_skb(buf, priv->rx_buf_size); | ||
495 | if (unlikely(!skb)) | ||
496 | net_dbg_ratelimited("build_skb failed\n"); | ||
497 | |||
498 | dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], | ||
499 | RX_BUF_SIZE, DMA_FROM_DEVICE); | ||
500 | priv->rx_phys[priv->rx_head] = 0; | ||
501 | |||
502 | desc = (struct rx_desc *)skb->data; | ||
503 | len = be16_to_cpu(desc->pkt_len); | ||
504 | err = be32_to_cpu(desc->pkt_err); | ||
505 | |||
506 | if (0 == len) { | ||
507 | dev_kfree_skb_any(skb); | ||
508 | last = true; | ||
509 | } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) { | ||
510 | dev_kfree_skb_any(skb); | ||
511 | stats->rx_dropped++; | ||
512 | stats->rx_errors++; | ||
513 | } else { | ||
514 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | ||
515 | skb_put(skb, len); | ||
516 | skb->protocol = eth_type_trans(skb, ndev); | ||
517 | napi_gro_receive(&priv->napi, skb); | ||
518 | stats->rx_packets++; | ||
519 | stats->rx_bytes += len; | ||
520 | rx++; | ||
521 | } | ||
522 | |||
523 | buf = netdev_alloc_frag(priv->rx_buf_size); | ||
524 | if (!buf) | ||
525 | goto done; | ||
526 | phys = dma_map_single(&ndev->dev, buf, | ||
527 | RX_BUF_SIZE, DMA_FROM_DEVICE); | ||
528 | if (dma_mapping_error(&ndev->dev, phys)) | ||
529 | goto done; | ||
530 | priv->rx_buf[priv->rx_head] = buf; | ||
531 | priv->rx_phys[priv->rx_head] = phys; | ||
532 | hip04_set_recv_desc(priv, phys); | ||
533 | |||
534 | priv->rx_head = RX_NEXT(priv->rx_head); | ||
535 | if (rx >= budget) | ||
536 | goto done; | ||
537 | |||
538 | if (--cnt == 0) | ||
539 | cnt = hip04_recv_cnt(priv); | ||
540 | } | ||
541 | |||
542 | if (!(priv->reg_inten & RCV_INT)) { | ||
543 | /* enable rx interrupt */ | ||
544 | priv->reg_inten |= RCV_INT; | ||
545 | writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); | ||
546 | } | ||
547 | napi_complete(napi); | ||
548 | done: | ||
549 | /* clean up tx descriptors and start a new timer if necessary */ | ||
550 | tx_remaining = hip04_tx_reclaim(ndev, false); | ||
551 | if (rx < budget && tx_remaining) | ||
552 | hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL); | ||
553 | |||
554 | return rx; | ||
555 | } | ||
556 | |||
557 | static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id) | ||
558 | { | ||
559 | struct net_device *ndev = (struct net_device *)dev_id; | ||
560 | struct hip04_priv *priv = netdev_priv(ndev); | ||
561 | struct net_device_stats *stats = &ndev->stats; | ||
562 | u32 ists = readl_relaxed(priv->base + PPE_INTSTS); | ||
563 | |||
564 | if (!ists) | ||
565 | return IRQ_NONE; | ||
566 | |||
567 | writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT); | ||
568 | |||
569 | if (unlikely(ists & DEF_INT_ERR)) { | ||
570 | if (ists & (RCV_NOBUF | RCV_DROP)) | ||
571 | stats->rx_errors++; | ||
572 | stats->rx_dropped++; | ||
573 | netdev_err(ndev, "rx drop\n"); | ||
574 | if (ists & TX_DROP) { | ||
575 | stats->tx_dropped++; | ||
576 | netdev_err(ndev, "tx drop\n"); | ||
577 | } | ||
578 | } | ||
579 | |||
580 | if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) { | ||
581 | /* disable rx interrupt */ | ||
582 | priv->reg_inten &= ~(RCV_INT); | ||
583 | writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); | ||
584 | hrtimer_cancel(&priv->tx_coalesce_timer); | ||
585 | __napi_schedule(&priv->napi); | ||
586 | } | ||
587 | |||
588 | return IRQ_HANDLED; | ||
589 | } | ||
590 | |||
591 | enum hrtimer_restart tx_done(struct hrtimer *hrtimer) | ||
592 | { | ||
593 | struct hip04_priv *priv; | ||
594 | |||
595 | priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer); | ||
596 | |||
597 | if (napi_schedule_prep(&priv->napi)) { | ||
598 | /* disable rx interrupt */ | ||
599 | priv->reg_inten &= ~(RCV_INT); | ||
600 | writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); | ||
601 | __napi_schedule(&priv->napi); | ||
602 | } | ||
603 | |||
604 | return HRTIMER_NORESTART; | ||
605 | } | ||
606 | |||
607 | static void hip04_adjust_link(struct net_device *ndev) | ||
608 | { | ||
609 | struct hip04_priv *priv = netdev_priv(ndev); | ||
610 | struct phy_device *phy = priv->phy; | ||
611 | |||
612 | if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { | ||
613 | hip04_config_port(ndev, phy->speed, phy->duplex); | ||
614 | phy_print_status(phy); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | static int hip04_mac_open(struct net_device *ndev) | ||
619 | { | ||
620 | struct hip04_priv *priv = netdev_priv(ndev); | ||
621 | int i; | ||
622 | |||
623 | priv->rx_head = 0; | ||
624 | priv->tx_head = 0; | ||
625 | priv->tx_tail = 0; | ||
626 | hip04_reset_ppe(priv); | ||
627 | |||
628 | for (i = 0; i < RX_DESC_NUM; i++) { | ||
629 | dma_addr_t phys; | ||
630 | |||
631 | phys = dma_map_single(&ndev->dev, priv->rx_buf[i], | ||
632 | RX_BUF_SIZE, DMA_FROM_DEVICE); | ||
633 | if (dma_mapping_error(&ndev->dev, phys)) | ||
634 | return -EIO; | ||
635 | |||
636 | priv->rx_phys[i] = phys; | ||
637 | hip04_set_recv_desc(priv, phys); | ||
638 | } | ||
639 | |||
640 | if (priv->phy) | ||
641 | phy_start(priv->phy); | ||
642 | |||
643 | netdev_reset_queue(ndev); | ||
644 | netif_start_queue(ndev); | ||
645 | hip04_mac_enable(ndev); | ||
646 | napi_enable(&priv->napi); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static int hip04_mac_stop(struct net_device *ndev) | ||
652 | { | ||
653 | struct hip04_priv *priv = netdev_priv(ndev); | ||
654 | int i; | ||
655 | |||
656 | napi_disable(&priv->napi); | ||
657 | netif_stop_queue(ndev); | ||
658 | hip04_mac_disable(ndev); | ||
659 | hip04_tx_reclaim(ndev, true); | ||
660 | hip04_reset_ppe(priv); | ||
661 | |||
662 | if (priv->phy) | ||
663 | phy_stop(priv->phy); | ||
664 | |||
665 | for (i = 0; i < RX_DESC_NUM; i++) { | ||
666 | if (priv->rx_phys[i]) { | ||
667 | dma_unmap_single(&ndev->dev, priv->rx_phys[i], | ||
668 | RX_BUF_SIZE, DMA_FROM_DEVICE); | ||
669 | priv->rx_phys[i] = 0; | ||
670 | } | ||
671 | } | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static void hip04_timeout(struct net_device *ndev) | ||
677 | { | ||
678 | struct hip04_priv *priv = netdev_priv(ndev); | ||
679 | |||
680 | schedule_work(&priv->tx_timeout_task); | ||
681 | } | ||
682 | |||
683 | static void hip04_tx_timeout_task(struct work_struct *work) | ||
684 | { | ||
685 | struct hip04_priv *priv; | ||
686 | |||
687 | priv = container_of(work, struct hip04_priv, tx_timeout_task); | ||
688 | hip04_mac_stop(priv->ndev); | ||
689 | hip04_mac_open(priv->ndev); | ||
690 | } | ||
691 | |||
692 | static struct net_device_stats *hip04_get_stats(struct net_device *ndev) | ||
693 | { | ||
694 | return &ndev->stats; | ||
695 | } | ||
696 | |||
697 | static int hip04_get_coalesce(struct net_device *netdev, | ||
698 | struct ethtool_coalesce *ec) | ||
699 | { | ||
700 | struct hip04_priv *priv = netdev_priv(netdev); | ||
701 | |||
702 | ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; | ||
703 | ec->tx_max_coalesced_frames = priv->tx_coalesce_frames; | ||
704 | |||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | static int hip04_set_coalesce(struct net_device *netdev, | ||
709 | struct ethtool_coalesce *ec) | ||
710 | { | ||
711 | struct hip04_priv *priv = netdev_priv(netdev); | ||
712 | |||
713 | /* Check not supported parameters */ | ||
714 | if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || | ||
715 | (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || | ||
716 | (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || | ||
717 | (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || | ||
718 | (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || | ||
719 | (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || | ||
720 | (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || | ||
721 | (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) || | ||
722 | (ec->tx_max_coalesced_frames_irq) || | ||
723 | (ec->stats_block_coalesce_usecs) || | ||
724 | (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) | ||
725 | return -EOPNOTSUPP; | ||
726 | |||
727 | if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS || | ||
728 | ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) || | ||
729 | (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES || | ||
730 | ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES)) | ||
731 | return -EINVAL; | ||
732 | |||
733 | priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; | ||
734 | priv->tx_coalesce_frames = ec->tx_max_coalesced_frames; | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static void hip04_get_drvinfo(struct net_device *netdev, | ||
740 | struct ethtool_drvinfo *drvinfo) | ||
741 | { | ||
742 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | ||
743 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | ||
744 | } | ||
745 | |||
746 | static struct ethtool_ops hip04_ethtool_ops = { | ||
747 | .get_coalesce = hip04_get_coalesce, | ||
748 | .set_coalesce = hip04_set_coalesce, | ||
749 | .get_drvinfo = hip04_get_drvinfo, | ||
750 | }; | ||
751 | |||
752 | static struct net_device_ops hip04_netdev_ops = { | ||
753 | .ndo_open = hip04_mac_open, | ||
754 | .ndo_stop = hip04_mac_stop, | ||
755 | .ndo_get_stats = hip04_get_stats, | ||
756 | .ndo_start_xmit = hip04_mac_start_xmit, | ||
757 | .ndo_set_mac_address = hip04_set_mac_address, | ||
758 | .ndo_tx_timeout = hip04_timeout, | ||
759 | .ndo_validate_addr = eth_validate_addr, | ||
760 | .ndo_change_mtu = eth_change_mtu, | ||
761 | }; | ||
762 | |||
763 | static int hip04_alloc_ring(struct net_device *ndev, struct device *d) | ||
764 | { | ||
765 | struct hip04_priv *priv = netdev_priv(ndev); | ||
766 | int i; | ||
767 | |||
768 | priv->tx_desc = dma_alloc_coherent(d, | ||
769 | TX_DESC_NUM * sizeof(struct tx_desc), | ||
770 | &priv->tx_desc_dma, GFP_KERNEL); | ||
771 | if (!priv->tx_desc) | ||
772 | return -ENOMEM; | ||
773 | |||
774 | priv->rx_buf_size = RX_BUF_SIZE + | ||
775 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
776 | for (i = 0; i < RX_DESC_NUM; i++) { | ||
777 | priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size); | ||
778 | if (!priv->rx_buf[i]) | ||
779 | return -ENOMEM; | ||
780 | } | ||
781 | |||
782 | return 0; | ||
783 | } | ||
784 | |||
785 | static void hip04_free_ring(struct net_device *ndev, struct device *d) | ||
786 | { | ||
787 | struct hip04_priv *priv = netdev_priv(ndev); | ||
788 | int i; | ||
789 | |||
790 | for (i = 0; i < RX_DESC_NUM; i++) | ||
791 | if (priv->rx_buf[i]) | ||
792 | put_page(virt_to_head_page(priv->rx_buf[i])); | ||
793 | |||
794 | for (i = 0; i < TX_DESC_NUM; i++) | ||
795 | if (priv->tx_skb[i]) | ||
796 | dev_kfree_skb_any(priv->tx_skb[i]); | ||
797 | |||
798 | dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc), | ||
799 | priv->tx_desc, priv->tx_desc_dma); | ||
800 | } | ||
801 | |||
802 | static int hip04_mac_probe(struct platform_device *pdev) | ||
803 | { | ||
804 | struct device *d = &pdev->dev; | ||
805 | struct device_node *node = d->of_node; | ||
806 | struct of_phandle_args arg; | ||
807 | struct net_device *ndev; | ||
808 | struct hip04_priv *priv; | ||
809 | struct resource *res; | ||
810 | unsigned int irq; | ||
811 | ktime_t txtime; | ||
812 | int ret; | ||
813 | |||
814 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); | ||
815 | if (!ndev) | ||
816 | return -ENOMEM; | ||
817 | |||
818 | priv = netdev_priv(ndev); | ||
819 | priv->ndev = ndev; | ||
820 | platform_set_drvdata(pdev, ndev); | ||
821 | |||
822 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
823 | priv->base = devm_ioremap_resource(d, res); | ||
824 | if (IS_ERR(priv->base)) { | ||
825 | ret = PTR_ERR(priv->base); | ||
826 | goto init_fail; | ||
827 | } | ||
828 | |||
829 | ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg); | ||
830 | if (ret < 0) { | ||
831 | dev_warn(d, "no port-handle\n"); | ||
832 | goto init_fail; | ||
833 | } | ||
834 | |||
835 | priv->port = arg.args[0]; | ||
836 | priv->chan = arg.args[1] * RX_DESC_NUM; | ||
837 | |||
838 | hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
839 | |||
840 | /* BQL will try to keep the TX queue as short as possible, but it can't | ||
841 | * be faster than tx_coalesce_usecs, so we need a fast timeout here, | ||
842 | * but also long enough to gather up enough frames to ensure we don't | ||
843 | * get more interrupts than necessary. | ||
844 | * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate | ||
845 | */ | ||
846 | priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; | ||
847 | priv->tx_coalesce_usecs = 200; | ||
848 | /* allow timer to fire after half the time at the earliest */ | ||
849 | txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2); | ||
850 | hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime); | ||
851 | priv->tx_coalesce_timer.function = tx_done; | ||
852 | |||
853 | priv->map = syscon_node_to_regmap(arg.np); | ||
854 | if (IS_ERR(priv->map)) { | ||
855 | dev_warn(d, "no syscon hisilicon,hip04-ppe\n"); | ||
856 | ret = PTR_ERR(priv->map); | ||
857 | goto init_fail; | ||
858 | } | ||
859 | |||
860 | priv->phy_mode = of_get_phy_mode(node); | ||
861 | if (priv->phy_mode < 0) { | ||
862 | dev_warn(d, "not find phy-mode\n"); | ||
863 | ret = -EINVAL; | ||
864 | goto init_fail; | ||
865 | } | ||
866 | |||
867 | irq = platform_get_irq(pdev, 0); | ||
868 | if (irq <= 0) { | ||
869 | ret = -EINVAL; | ||
870 | goto init_fail; | ||
871 | } | ||
872 | |||
873 | ret = devm_request_irq(d, irq, hip04_mac_interrupt, | ||
874 | 0, pdev->name, ndev); | ||
875 | if (ret) { | ||
876 | netdev_err(ndev, "devm_request_irq failed\n"); | ||
877 | goto init_fail; | ||
878 | } | ||
879 | |||
880 | priv->phy_node = of_parse_phandle(node, "phy-handle", 0); | ||
881 | if (priv->phy_node) { | ||
882 | priv->phy = of_phy_connect(ndev, priv->phy_node, | ||
883 | &hip04_adjust_link, | ||
884 | 0, priv->phy_mode); | ||
885 | if (!priv->phy) { | ||
886 | ret = -EPROBE_DEFER; | ||
887 | goto init_fail; | ||
888 | } | ||
889 | } | ||
890 | |||
891 | INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task); | ||
892 | |||
893 | ether_setup(ndev); | ||
894 | ndev->netdev_ops = &hip04_netdev_ops; | ||
895 | ndev->ethtool_ops = &hip04_ethtool_ops; | ||
896 | ndev->watchdog_timeo = TX_TIMEOUT; | ||
897 | ndev->priv_flags |= IFF_UNICAST_FLT; | ||
898 | ndev->irq = irq; | ||
899 | netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT); | ||
900 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
901 | |||
902 | hip04_reset_ppe(priv); | ||
903 | if (priv->phy_mode == PHY_INTERFACE_MODE_MII) | ||
904 | hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); | ||
905 | |||
906 | hip04_config_fifo(priv); | ||
907 | random_ether_addr(ndev->dev_addr); | ||
908 | hip04_update_mac_address(ndev); | ||
909 | |||
910 | ret = hip04_alloc_ring(ndev, d); | ||
911 | if (ret) { | ||
912 | netdev_err(ndev, "alloc ring fail\n"); | ||
913 | goto alloc_fail; | ||
914 | } | ||
915 | |||
916 | ret = register_netdev(ndev); | ||
917 | if (ret) { | ||
918 | free_netdev(ndev); | ||
919 | goto alloc_fail; | ||
920 | } | ||
921 | |||
922 | return 0; | ||
923 | |||
924 | alloc_fail: | ||
925 | hip04_free_ring(ndev, d); | ||
926 | init_fail: | ||
927 | of_node_put(priv->phy_node); | ||
928 | free_netdev(ndev); | ||
929 | return ret; | ||
930 | } | ||
931 | |||
932 | static int hip04_remove(struct platform_device *pdev) | ||
933 | { | ||
934 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
935 | struct hip04_priv *priv = netdev_priv(ndev); | ||
936 | struct device *d = &pdev->dev; | ||
937 | |||
938 | if (priv->phy) | ||
939 | phy_disconnect(priv->phy); | ||
940 | |||
941 | hip04_free_ring(ndev, d); | ||
942 | unregister_netdev(ndev); | ||
943 | free_irq(ndev->irq, ndev); | ||
944 | of_node_put(priv->phy_node); | ||
945 | cancel_work_sync(&priv->tx_timeout_task); | ||
946 | free_netdev(ndev); | ||
947 | |||
948 | return 0; | ||
949 | } | ||
950 | |||
951 | static const struct of_device_id hip04_mac_match[] = { | ||
952 | { .compatible = "hisilicon,hip04-mac" }, | ||
953 | { } | ||
954 | }; | ||
955 | |||
956 | MODULE_DEVICE_TABLE(of, hip04_mac_match); | ||
957 | |||
958 | static struct platform_driver hip04_mac_driver = { | ||
959 | .probe = hip04_mac_probe, | ||
960 | .remove = hip04_remove, | ||
961 | .driver = { | ||
962 | .name = DRV_NAME, | ||
963 | .owner = THIS_MODULE, | ||
964 | .of_match_table = hip04_mac_match, | ||
965 | }, | ||
966 | }; | ||
967 | module_platform_driver(hip04_mac_driver); | ||
968 | |||
969 | MODULE_DESCRIPTION("HISILICON P04 Ethernet driver"); | ||