diff options
author | Florian Fainelli <f.fainelli@gmail.com> | 2014-02-13 19:08:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-02-14 00:27:58 -0500 |
commit | 1c1008c793fa46703a2fee469f4235e1c7984333 (patch) | |
tree | c876555a732e25cebd66a8b4aa1078305d7eb6cf /drivers/net/ethernet/broadcom/genet | |
parent | b4af9a559cd971d08cbb58d81d932d8bd1787ade (diff) |
net: bcmgenet: add main driver file
This patch adds the BCMGENET main driver file which supports the
following:
- GENET hardware from V1 to V4
- support for reading the UniMAC MIB counters statistics
- support for the 5 transmit queues
- support for RX/TX checksum offload and SG
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/genet')
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 2595 |
1 files changed, 2595 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c new file mode 100644 index 000000000000..0ebc29769510 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -0,0 +1,2595 @@ | |||
1 | /* | ||
2 | * Broadcom GENET (Gigabit Ethernet) controller driver | ||
3 | * | ||
4 | * Copyright (c) 2014 Broadcom Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #define pr_fmt(fmt) "bcmgenet: " fmt | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/fcntl.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/if_ether.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/pm.h> | ||
36 | #include <linux/clk.h> | ||
37 | #include <linux/version.h> | ||
38 | #include <linux/of.h> | ||
39 | #include <linux/of_address.h> | ||
40 | #include <linux/of_irq.h> | ||
41 | #include <linux/of_net.h> | ||
42 | #include <linux/of_platform.h> | ||
43 | #include <net/arp.h> | ||
44 | |||
45 | #include <linux/mii.h> | ||
46 | #include <linux/ethtool.h> | ||
47 | #include <linux/netdevice.h> | ||
48 | #include <linux/inetdevice.h> | ||
49 | #include <linux/etherdevice.h> | ||
50 | #include <linux/skbuff.h> | ||
51 | #include <linux/in.h> | ||
52 | #include <linux/ip.h> | ||
53 | #include <linux/ipv6.h> | ||
54 | #include <linux/phy.h> | ||
55 | |||
56 | #include <asm/unaligned.h> | ||
57 | |||
58 | #include "bcmgenet.h" | ||
59 | |||
60 | /* Maximum number of hardware queues, downsized if needed */ | ||
61 | #define GENET_MAX_MQ_CNT 4 | ||
62 | |||
63 | /* Default highest priority queue for multi queue support */ | ||
64 | #define GENET_Q0_PRIORITY 0 | ||
65 | |||
66 | #define GENET_DEFAULT_BD_CNT \ | ||
67 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) | ||
68 | |||
69 | #define RX_BUF_LENGTH 2048 | ||
70 | #define SKB_ALIGNMENT 32 | ||
71 | |||
72 | /* Tx/Rx DMA register offset, skip 256 descriptors */ | ||
73 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) | ||
74 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) | ||
75 | |||
76 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ | ||
77 | TOTAL_DESC * DMA_DESC_SIZE) | ||
78 | |||
79 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ | ||
80 | TOTAL_DESC * DMA_DESC_SIZE) | ||
81 | |||
82 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | ||
83 | void __iomem *d, u32 value) | ||
84 | { | ||
85 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | ||
86 | } | ||
87 | |||
88 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | ||
89 | void __iomem *d) | ||
90 | { | ||
91 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | ||
92 | } | ||
93 | |||
94 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | ||
95 | void __iomem *d, | ||
96 | dma_addr_t addr) | ||
97 | { | ||
98 | __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); | ||
99 | |||
100 | /* Register writes to GISB bus can take couple hundred nanoseconds | ||
101 | * and are done for each packet, save these expensive writes unless | ||
102 | * the platform is explicitely configured for 64-bits/LPAE. | ||
103 | */ | ||
104 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
105 | if (priv->hw_params->flags & GENET_HAS_40BITS) | ||
106 | __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); | ||
107 | #endif | ||
108 | } | ||
109 | |||
110 | /* Combined address + length/status setter */ | ||
111 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | ||
112 | void __iomem *d, dma_addr_t addr, u32 val) | ||
113 | { | ||
114 | dmadesc_set_length_status(priv, d, val); | ||
115 | dmadesc_set_addr(priv, d, addr); | ||
116 | } | ||
117 | |||
118 | static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | ||
119 | void __iomem *d) | ||
120 | { | ||
121 | dma_addr_t addr; | ||
122 | |||
123 | addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); | ||
124 | |||
125 | /* Register writes to GISB bus can take couple hundred nanoseconds | ||
126 | * and are done for each packet, save these expensive writes unless | ||
127 | * the platform is explicitely configured for 64-bits/LPAE. | ||
128 | */ | ||
129 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
130 | if (priv->hw_params->flags & GENET_HAS_40BITS) | ||
131 | addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; | ||
132 | #endif | ||
133 | return addr; | ||
134 | } | ||
135 | |||
136 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" | ||
137 | |||
138 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | ||
139 | NETIF_MSG_LINK) | ||
140 | |||
141 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) | ||
142 | { | ||
143 | if (GENET_IS_V1(priv)) | ||
144 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); | ||
145 | else | ||
146 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); | ||
147 | } | ||
148 | |||
149 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | ||
150 | { | ||
151 | if (GENET_IS_V1(priv)) | ||
152 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); | ||
153 | else | ||
154 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); | ||
155 | } | ||
156 | |||
157 | /* These macros are defined to deal with register map change | ||
158 | * between GENET1.1 and GENET2. Only those currently being used | ||
159 | * by driver are defined. | ||
160 | */ | ||
161 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) | ||
162 | { | ||
163 | if (GENET_IS_V1(priv)) | ||
164 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); | ||
165 | else | ||
166 | return __raw_readl(priv->base + | ||
167 | priv->hw_params->tbuf_offset + TBUF_CTRL); | ||
168 | } | ||
169 | |||
170 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | ||
171 | { | ||
172 | if (GENET_IS_V1(priv)) | ||
173 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); | ||
174 | else | ||
175 | __raw_writel(val, priv->base + | ||
176 | priv->hw_params->tbuf_offset + TBUF_CTRL); | ||
177 | } | ||
178 | |||
179 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) | ||
180 | { | ||
181 | if (GENET_IS_V1(priv)) | ||
182 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); | ||
183 | else | ||
184 | return __raw_readl(priv->base + | ||
185 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | ||
186 | } | ||
187 | |||
188 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) | ||
189 | { | ||
190 | if (GENET_IS_V1(priv)) | ||
191 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); | ||
192 | else | ||
193 | __raw_writel(val, priv->base + | ||
194 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | ||
195 | } | ||
196 | |||
197 | /* RX/TX DMA register accessors */ | ||
198 | enum dma_reg { | ||
199 | DMA_RING_CFG = 0, | ||
200 | DMA_CTRL, | ||
201 | DMA_STATUS, | ||
202 | DMA_SCB_BURST_SIZE, | ||
203 | DMA_ARB_CTRL, | ||
204 | DMA_PRIORITY, | ||
205 | DMA_RING_PRIORITY, | ||
206 | }; | ||
207 | |||
208 | static const u8 bcmgenet_dma_regs_v3plus[] = { | ||
209 | [DMA_RING_CFG] = 0x00, | ||
210 | [DMA_CTRL] = 0x04, | ||
211 | [DMA_STATUS] = 0x08, | ||
212 | [DMA_SCB_BURST_SIZE] = 0x0C, | ||
213 | [DMA_ARB_CTRL] = 0x2C, | ||
214 | [DMA_PRIORITY] = 0x30, | ||
215 | [DMA_RING_PRIORITY] = 0x38, | ||
216 | }; | ||
217 | |||
218 | static const u8 bcmgenet_dma_regs_v2[] = { | ||
219 | [DMA_RING_CFG] = 0x00, | ||
220 | [DMA_CTRL] = 0x04, | ||
221 | [DMA_STATUS] = 0x08, | ||
222 | [DMA_SCB_BURST_SIZE] = 0x0C, | ||
223 | [DMA_ARB_CTRL] = 0x30, | ||
224 | [DMA_PRIORITY] = 0x34, | ||
225 | [DMA_RING_PRIORITY] = 0x3C, | ||
226 | }; | ||
227 | |||
228 | static const u8 bcmgenet_dma_regs_v1[] = { | ||
229 | [DMA_CTRL] = 0x00, | ||
230 | [DMA_STATUS] = 0x04, | ||
231 | [DMA_SCB_BURST_SIZE] = 0x0C, | ||
232 | [DMA_ARB_CTRL] = 0x30, | ||
233 | [DMA_PRIORITY] = 0x34, | ||
234 | [DMA_RING_PRIORITY] = 0x3C, | ||
235 | }; | ||
236 | |||
237 | /* Set at runtime once bcmgenet version is known */ | ||
238 | static const u8 *bcmgenet_dma_regs; | ||
239 | |||
240 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | ||
241 | { | ||
242 | return netdev_priv(dev_get_drvdata(dev)); | ||
243 | } | ||
244 | |||
245 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | ||
246 | enum dma_reg r) | ||
247 | { | ||
248 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | ||
249 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | ||
250 | } | ||
251 | |||
252 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | ||
253 | u32 val, enum dma_reg r) | ||
254 | { | ||
255 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | ||
256 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | ||
257 | } | ||
258 | |||
259 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | ||
260 | enum dma_reg r) | ||
261 | { | ||
262 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | ||
263 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | ||
264 | } | ||
265 | |||
266 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, | ||
267 | u32 val, enum dma_reg r) | ||
268 | { | ||
269 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | ||
270 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | ||
271 | } | ||
272 | |||
273 | /* RDMA/TDMA ring registers and accessors | ||
274 | * we merge the common fields and just prefix with T/D the registers | ||
275 | * having different meaning depending on the direction | ||
276 | */ | ||
277 | enum dma_ring_reg { | ||
278 | TDMA_READ_PTR = 0, | ||
279 | RDMA_WRITE_PTR = TDMA_READ_PTR, | ||
280 | TDMA_READ_PTR_HI, | ||
281 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, | ||
282 | TDMA_CONS_INDEX, | ||
283 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, | ||
284 | TDMA_PROD_INDEX, | ||
285 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, | ||
286 | DMA_RING_BUF_SIZE, | ||
287 | DMA_START_ADDR, | ||
288 | DMA_START_ADDR_HI, | ||
289 | DMA_END_ADDR, | ||
290 | DMA_END_ADDR_HI, | ||
291 | DMA_MBUF_DONE_THRESH, | ||
292 | TDMA_FLOW_PERIOD, | ||
293 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, | ||
294 | TDMA_WRITE_PTR, | ||
295 | RDMA_READ_PTR = TDMA_WRITE_PTR, | ||
296 | TDMA_WRITE_PTR_HI, | ||
297 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI | ||
298 | }; | ||
299 | |||
300 | /* GENET v4 supports 40-bits pointer addressing | ||
301 | * for obvious reasons the LO and HI word parts | ||
302 | * are contiguous, but this offsets the other | ||
303 | * registers. | ||
304 | */ | ||
305 | static const u8 genet_dma_ring_regs_v4[] = { | ||
306 | [TDMA_READ_PTR] = 0x00, | ||
307 | [TDMA_READ_PTR_HI] = 0x04, | ||
308 | [TDMA_CONS_INDEX] = 0x08, | ||
309 | [TDMA_PROD_INDEX] = 0x0C, | ||
310 | [DMA_RING_BUF_SIZE] = 0x10, | ||
311 | [DMA_START_ADDR] = 0x14, | ||
312 | [DMA_START_ADDR_HI] = 0x18, | ||
313 | [DMA_END_ADDR] = 0x1C, | ||
314 | [DMA_END_ADDR_HI] = 0x20, | ||
315 | [DMA_MBUF_DONE_THRESH] = 0x24, | ||
316 | [TDMA_FLOW_PERIOD] = 0x28, | ||
317 | [TDMA_WRITE_PTR] = 0x2C, | ||
318 | [TDMA_WRITE_PTR_HI] = 0x30, | ||
319 | }; | ||
320 | |||
321 | static const u8 genet_dma_ring_regs_v123[] = { | ||
322 | [TDMA_READ_PTR] = 0x00, | ||
323 | [TDMA_CONS_INDEX] = 0x04, | ||
324 | [TDMA_PROD_INDEX] = 0x08, | ||
325 | [DMA_RING_BUF_SIZE] = 0x0C, | ||
326 | [DMA_START_ADDR] = 0x10, | ||
327 | [DMA_END_ADDR] = 0x14, | ||
328 | [DMA_MBUF_DONE_THRESH] = 0x18, | ||
329 | [TDMA_FLOW_PERIOD] = 0x1C, | ||
330 | [TDMA_WRITE_PTR] = 0x20, | ||
331 | }; | ||
332 | |||
333 | /* Set at runtime once GENET version is known */ | ||
334 | static const u8 *genet_dma_ring_regs; | ||
335 | |||
336 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | ||
337 | unsigned int ring, | ||
338 | enum dma_ring_reg r) | ||
339 | { | ||
340 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | ||
341 | (DMA_RING_SIZE * ring) + | ||
342 | genet_dma_ring_regs[r]); | ||
343 | } | ||
344 | |||
345 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | ||
346 | unsigned int ring, | ||
347 | u32 val, | ||
348 | enum dma_ring_reg r) | ||
349 | { | ||
350 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | ||
351 | (DMA_RING_SIZE * ring) + | ||
352 | genet_dma_ring_regs[r]); | ||
353 | } | ||
354 | |||
355 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | ||
356 | unsigned int ring, | ||
357 | enum dma_ring_reg r) | ||
358 | { | ||
359 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | ||
360 | (DMA_RING_SIZE * ring) + | ||
361 | genet_dma_ring_regs[r]); | ||
362 | } | ||
363 | |||
364 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | ||
365 | unsigned int ring, | ||
366 | u32 val, | ||
367 | enum dma_ring_reg r) | ||
368 | { | ||
369 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | ||
370 | (DMA_RING_SIZE * ring) + | ||
371 | genet_dma_ring_regs[r]); | ||
372 | } | ||
373 | |||
374 | static int bcmgenet_get_settings(struct net_device *dev, | ||
375 | struct ethtool_cmd *cmd) | ||
376 | { | ||
377 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
378 | |||
379 | if (!netif_running(dev)) | ||
380 | return -EINVAL; | ||
381 | |||
382 | if (!priv->phydev) | ||
383 | return -ENODEV; | ||
384 | |||
385 | return phy_ethtool_gset(priv->phydev, cmd); | ||
386 | } | ||
387 | |||
388 | static int bcmgenet_set_settings(struct net_device *dev, | ||
389 | struct ethtool_cmd *cmd) | ||
390 | { | ||
391 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
392 | |||
393 | if (!netif_running(dev)) | ||
394 | return -EINVAL; | ||
395 | |||
396 | if (!priv->phydev) | ||
397 | return -ENODEV; | ||
398 | |||
399 | return phy_ethtool_sset(priv->phydev, cmd); | ||
400 | } | ||
401 | |||
402 | static int bcmgenet_set_rx_csum(struct net_device *dev, | ||
403 | netdev_features_t wanted) | ||
404 | { | ||
405 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
406 | u32 rbuf_chk_ctrl; | ||
407 | bool rx_csum_en; | ||
408 | |||
409 | rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | ||
410 | |||
411 | rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); | ||
412 | |||
413 | /* enable rx checksumming */ | ||
414 | if (rx_csum_en) | ||
415 | rbuf_chk_ctrl |= RBUF_RXCHK_EN; | ||
416 | else | ||
417 | rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; | ||
418 | priv->desc_rxchk_en = rx_csum_en; | ||
419 | bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); | ||
420 | |||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int bcmgenet_set_tx_csum(struct net_device *dev, | ||
425 | netdev_features_t wanted) | ||
426 | { | ||
427 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
428 | bool desc_64b_en; | ||
429 | u32 tbuf_ctrl, rbuf_ctrl; | ||
430 | |||
431 | tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); | ||
432 | rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | ||
433 | |||
434 | desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | ||
435 | |||
436 | /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ | ||
437 | if (desc_64b_en) { | ||
438 | tbuf_ctrl |= RBUF_64B_EN; | ||
439 | rbuf_ctrl |= RBUF_64B_EN; | ||
440 | } else { | ||
441 | tbuf_ctrl &= ~RBUF_64B_EN; | ||
442 | rbuf_ctrl &= ~RBUF_64B_EN; | ||
443 | } | ||
444 | priv->desc_64b_en = desc_64b_en; | ||
445 | |||
446 | bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); | ||
447 | bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int bcmgenet_set_features(struct net_device *dev, | ||
453 | netdev_features_t features) | ||
454 | { | ||
455 | netdev_features_t changed = features ^ dev->features; | ||
456 | netdev_features_t wanted = dev->wanted_features; | ||
457 | int ret = 0; | ||
458 | |||
459 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) | ||
460 | ret = bcmgenet_set_tx_csum(dev, wanted); | ||
461 | if (changed & (NETIF_F_RXCSUM)) | ||
462 | ret = bcmgenet_set_rx_csum(dev, wanted); | ||
463 | |||
464 | return ret; | ||
465 | } | ||
466 | |||
467 | static u32 bcmgenet_get_msglevel(struct net_device *dev) | ||
468 | { | ||
469 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
470 | |||
471 | return priv->msg_enable; | ||
472 | } | ||
473 | |||
474 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) | ||
475 | { | ||
476 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
477 | |||
478 | priv->msg_enable = level; | ||
479 | } | ||
480 | |||
481 | /* standard ethtool support functions. */ | ||
482 | enum bcmgenet_stat_type { | ||
483 | BCMGENET_STAT_NETDEV = -1, | ||
484 | BCMGENET_STAT_MIB_RX, | ||
485 | BCMGENET_STAT_MIB_TX, | ||
486 | BCMGENET_STAT_RUNT, | ||
487 | BCMGENET_STAT_MISC, | ||
488 | }; | ||
489 | |||
490 | struct bcmgenet_stats { | ||
491 | char stat_string[ETH_GSTRING_LEN]; | ||
492 | int stat_sizeof; | ||
493 | int stat_offset; | ||
494 | enum bcmgenet_stat_type type; | ||
495 | /* reg offset from UMAC base for misc counters */ | ||
496 | u16 reg_offset; | ||
497 | }; | ||
498 | |||
499 | #define STAT_NETDEV(m) { \ | ||
500 | .stat_string = __stringify(m), \ | ||
501 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ | ||
502 | .stat_offset = offsetof(struct net_device_stats, m), \ | ||
503 | .type = BCMGENET_STAT_NETDEV, \ | ||
504 | } | ||
505 | |||
506 | #define STAT_GENET_MIB(str, m, _type) { \ | ||
507 | .stat_string = str, \ | ||
508 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | ||
509 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | ||
510 | .type = _type, \ | ||
511 | } | ||
512 | |||
513 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | ||
514 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | ||
515 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | ||
516 | |||
517 | #define STAT_GENET_MISC(str, m, offset) { \ | ||
518 | .stat_string = str, \ | ||
519 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | ||
520 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | ||
521 | .type = BCMGENET_STAT_MISC, \ | ||
522 | .reg_offset = offset, \ | ||
523 | } | ||
524 | |||
525 | |||
526 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then | ||
527 | * between the end of TX stats and the beginning of the RX RUNT | ||
528 | */ | ||
529 | #define BCMGENET_STAT_OFFSET 0xc | ||
530 | |||
531 | /* Hardware counters must be kept in sync because the order/offset | ||
532 | * is important here (order in structure declaration = order in hardware) | ||
533 | */ | ||
534 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | ||
535 | /* general stats */ | ||
536 | STAT_NETDEV(rx_packets), | ||
537 | STAT_NETDEV(tx_packets), | ||
538 | STAT_NETDEV(rx_bytes), | ||
539 | STAT_NETDEV(tx_bytes), | ||
540 | STAT_NETDEV(rx_errors), | ||
541 | STAT_NETDEV(tx_errors), | ||
542 | STAT_NETDEV(rx_dropped), | ||
543 | STAT_NETDEV(tx_dropped), | ||
544 | STAT_NETDEV(multicast), | ||
545 | /* UniMAC RSV counters */ | ||
546 | STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), | ||
547 | STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), | ||
548 | STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), | ||
549 | STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), | ||
550 | STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), | ||
551 | STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), | ||
552 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), | ||
553 | STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), | ||
554 | STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), | ||
555 | STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), | ||
556 | STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), | ||
557 | STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), | ||
558 | STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), | ||
559 | STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), | ||
560 | STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), | ||
561 | STAT_GENET_MIB_RX("rx_control", mib.rx.cf), | ||
562 | STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), | ||
563 | STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), | ||
564 | STAT_GENET_MIB_RX("rx_align", mib.rx.aln), | ||
565 | STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), | ||
566 | STAT_GENET_MIB_RX("rx_code", mib.rx.cde), | ||
567 | STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), | ||
568 | STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), | ||
569 | STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), | ||
570 | STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), | ||
571 | STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), | ||
572 | STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), | ||
573 | STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), | ||
574 | STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), | ||
575 | /* UniMAC TSV counters */ | ||
576 | STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), | ||
577 | STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), | ||
578 | STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), | ||
579 | STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), | ||
580 | STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), | ||
581 | STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), | ||
582 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), | ||
583 | STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), | ||
584 | STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), | ||
585 | STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), | ||
586 | STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), | ||
587 | STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), | ||
588 | STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), | ||
589 | STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), | ||
590 | STAT_GENET_MIB_TX("tx_control", mib.tx.cf), | ||
591 | STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), | ||
592 | STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), | ||
593 | STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), | ||
594 | STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), | ||
595 | STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), | ||
596 | STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), | ||
597 | STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), | ||
598 | STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), | ||
599 | STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), | ||
600 | STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), | ||
601 | STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), | ||
602 | STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), | ||
603 | STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), | ||
604 | STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), | ||
605 | /* UniMAC RUNT counters */ | ||
606 | STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), | ||
607 | STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), | ||
608 | STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), | ||
609 | STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), | ||
610 | /* Misc UniMAC counters */ | ||
611 | STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, | ||
612 | UMAC_RBUF_OVFL_CNT), | ||
613 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | ||
614 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | ||
615 | }; | ||
616 | |||
617 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | ||
618 | |||
619 | static void bcmgenet_get_drvinfo(struct net_device *dev, | ||
620 | struct ethtool_drvinfo *info) | ||
621 | { | ||
622 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | ||
623 | strlcpy(info->version, "v2.0", sizeof(info->version)); | ||
624 | info->n_stats = BCMGENET_STATS_LEN; | ||
625 | |||
626 | } | ||
627 | |||
628 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | ||
629 | { | ||
630 | switch (string_set) { | ||
631 | case ETH_SS_STATS: | ||
632 | return BCMGENET_STATS_LEN; | ||
633 | default: | ||
634 | return -EOPNOTSUPP; | ||
635 | } | ||
636 | } | ||
637 | |||
638 | static void bcmgenet_get_strings(struct net_device *dev, | ||
639 | u32 stringset, u8 *data) | ||
640 | { | ||
641 | int i; | ||
642 | |||
643 | switch (stringset) { | ||
644 | case ETH_SS_STATS: | ||
645 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | ||
646 | memcpy(data + i * ETH_GSTRING_LEN, | ||
647 | bcmgenet_gstrings_stats[i].stat_string, | ||
648 | ETH_GSTRING_LEN); | ||
649 | } | ||
650 | break; | ||
651 | } | ||
652 | } | ||
653 | |||
654 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | ||
655 | { | ||
656 | int i, j = 0; | ||
657 | |||
658 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | ||
659 | const struct bcmgenet_stats *s; | ||
660 | u8 offset = 0; | ||
661 | u32 val = 0; | ||
662 | char *p; | ||
663 | |||
664 | s = &bcmgenet_gstrings_stats[i]; | ||
665 | switch (s->type) { | ||
666 | case BCMGENET_STAT_NETDEV: | ||
667 | continue; | ||
668 | case BCMGENET_STAT_MIB_RX: | ||
669 | case BCMGENET_STAT_MIB_TX: | ||
670 | case BCMGENET_STAT_RUNT: | ||
671 | if (s->type != BCMGENET_STAT_MIB_RX) | ||
672 | offset = BCMGENET_STAT_OFFSET; | ||
673 | val = bcmgenet_umac_readl(priv, UMAC_MIB_START + | ||
674 | j + offset); | ||
675 | break; | ||
676 | case BCMGENET_STAT_MISC: | ||
677 | val = bcmgenet_umac_readl(priv, s->reg_offset); | ||
678 | /* clear if overflowed */ | ||
679 | if (val == ~0) | ||
680 | bcmgenet_umac_writel(priv, 0, s->reg_offset); | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | j += s->stat_sizeof; | ||
685 | p = (char *)priv + s->stat_offset; | ||
686 | *(u32 *)p = val; | ||
687 | } | ||
688 | } | ||
689 | |||
690 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | ||
691 | struct ethtool_stats *stats, | ||
692 | u64 *data) | ||
693 | { | ||
694 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
695 | int i; | ||
696 | |||
697 | if (netif_running(dev)) | ||
698 | bcmgenet_update_mib_counters(priv); | ||
699 | |||
700 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | ||
701 | const struct bcmgenet_stats *s; | ||
702 | char *p; | ||
703 | |||
704 | s = &bcmgenet_gstrings_stats[i]; | ||
705 | if (s->type == BCMGENET_STAT_NETDEV) | ||
706 | p = (char *)&dev->stats; | ||
707 | else | ||
708 | p = (char *)priv; | ||
709 | p += s->stat_offset; | ||
710 | data[i] = *(u32 *)p; | ||
711 | } | ||
712 | } | ||
713 | |||
714 | /* standard ethtool support functions. */ | ||
715 | static struct ethtool_ops bcmgenet_ethtool_ops = { | ||
716 | .get_strings = bcmgenet_get_strings, | ||
717 | .get_sset_count = bcmgenet_get_sset_count, | ||
718 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, | ||
719 | .get_settings = bcmgenet_get_settings, | ||
720 | .set_settings = bcmgenet_set_settings, | ||
721 | .get_drvinfo = bcmgenet_get_drvinfo, | ||
722 | .get_link = ethtool_op_get_link, | ||
723 | .get_msglevel = bcmgenet_get_msglevel, | ||
724 | .set_msglevel = bcmgenet_set_msglevel, | ||
725 | }; | ||
726 | |||
727 | /* Power down the unimac, based on mode. */ | ||
728 | static void bcmgenet_power_down(struct bcmgenet_priv *priv, | ||
729 | enum bcmgenet_power_mode mode) | ||
730 | { | ||
731 | u32 reg; | ||
732 | |||
733 | switch (mode) { | ||
734 | case GENET_POWER_CABLE_SENSE: | ||
735 | if (priv->phydev) | ||
736 | phy_detach(priv->phydev); | ||
737 | break; | ||
738 | |||
739 | case GENET_POWER_PASSIVE: | ||
740 | /* Power down LED */ | ||
741 | bcmgenet_mii_reset(priv->dev); | ||
742 | if (priv->hw_params->flags & GENET_HAS_EXT) { | ||
743 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | ||
744 | reg |= (EXT_PWR_DOWN_PHY | | ||
745 | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); | ||
746 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | ||
747 | } | ||
748 | break; | ||
749 | default: | ||
750 | break; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | ||
755 | enum bcmgenet_power_mode mode) | ||
756 | { | ||
757 | u32 reg; | ||
758 | |||
759 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) | ||
760 | return; | ||
761 | |||
762 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | ||
763 | |||
764 | switch (mode) { | ||
765 | case GENET_POWER_PASSIVE: | ||
766 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | | ||
767 | EXT_PWR_DOWN_BIAS); | ||
768 | /* fallthrough */ | ||
769 | case GENET_POWER_CABLE_SENSE: | ||
770 | /* enable APD */ | ||
771 | reg |= EXT_PWR_DN_EN_LD; | ||
772 | break; | ||
773 | default: | ||
774 | break; | ||
775 | } | ||
776 | |||
777 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | ||
778 | bcmgenet_mii_reset(priv->dev); | ||
779 | } | ||
780 | |||
781 | /* ioctl handle special commands that are not present in ethtool. */ | ||
782 | static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
783 | { | ||
784 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
785 | int val = 0; | ||
786 | |||
787 | if (!netif_running(dev)) | ||
788 | return -EINVAL; | ||
789 | |||
790 | switch (cmd) { | ||
791 | case SIOCGMIIPHY: | ||
792 | case SIOCGMIIREG: | ||
793 | case SIOCSMIIREG: | ||
794 | if (!priv->phydev) | ||
795 | val = -ENODEV; | ||
796 | else | ||
797 | val = phy_mii_ioctl(priv->phydev, rq, cmd); | ||
798 | break; | ||
799 | |||
800 | default: | ||
801 | val = -EINVAL; | ||
802 | break; | ||
803 | } | ||
804 | |||
805 | return val; | ||
806 | } | ||
807 | |||
808 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | ||
809 | struct bcmgenet_tx_ring *ring) | ||
810 | { | ||
811 | struct enet_cb *tx_cb_ptr; | ||
812 | |||
813 | tx_cb_ptr = ring->cbs; | ||
814 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | ||
815 | tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; | ||
816 | /* Advancing local write pointer */ | ||
817 | if (ring->write_ptr == ring->end_ptr) | ||
818 | ring->write_ptr = ring->cb_ptr; | ||
819 | else | ||
820 | ring->write_ptr++; | ||
821 | |||
822 | return tx_cb_ptr; | ||
823 | } | ||
824 | |||
825 | /* Simple helper to free a control block's resources */ | ||
826 | static void bcmgenet_free_cb(struct enet_cb *cb) | ||
827 | { | ||
828 | dev_kfree_skb_any(cb->skb); | ||
829 | cb->skb = NULL; | ||
830 | dma_unmap_addr_set(cb, dma_addr, 0); | ||
831 | } | ||
832 | |||
833 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, | ||
834 | struct bcmgenet_tx_ring *ring) | ||
835 | { | ||
836 | bcmgenet_intrl2_0_writel(priv, | ||
837 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, | ||
838 | INTRL2_CPU_MASK_SET); | ||
839 | } | ||
840 | |||
841 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, | ||
842 | struct bcmgenet_tx_ring *ring) | ||
843 | { | ||
844 | bcmgenet_intrl2_0_writel(priv, | ||
845 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, | ||
846 | INTRL2_CPU_MASK_CLEAR); | ||
847 | } | ||
848 | |||
849 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, | ||
850 | struct bcmgenet_tx_ring *ring) | ||
851 | { | ||
852 | bcmgenet_intrl2_1_writel(priv, | ||
853 | (1 << ring->index), INTRL2_CPU_MASK_CLEAR); | ||
854 | priv->int1_mask &= ~(1 << ring->index); | ||
855 | } | ||
856 | |||
857 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | ||
858 | struct bcmgenet_tx_ring *ring) | ||
859 | { | ||
860 | bcmgenet_intrl2_1_writel(priv, | ||
861 | (1 << ring->index), INTRL2_CPU_MASK_SET); | ||
862 | priv->int1_mask |= (1 << ring->index); | ||
863 | } | ||
864 | |||
865 | /* Unlocked version of the reclaim routine */ | ||
866 | static void __bcmgenet_tx_reclaim(struct net_device *dev, | ||
867 | struct bcmgenet_tx_ring *ring) | ||
868 | { | ||
869 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
870 | int last_tx_cn, last_c_index, num_tx_bds; | ||
871 | struct enet_cb *tx_cb_ptr; | ||
872 | unsigned int c_index; | ||
873 | |||
874 | /* Compute how many buffers are transmited since last xmit call */ | ||
875 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); | ||
876 | |||
877 | last_c_index = ring->c_index; | ||
878 | num_tx_bds = ring->size; | ||
879 | |||
880 | c_index &= (num_tx_bds - 1); | ||
881 | |||
882 | if (c_index >= last_c_index) | ||
883 | last_tx_cn = c_index - last_c_index; | ||
884 | else | ||
885 | last_tx_cn = num_tx_bds - last_c_index + c_index; | ||
886 | |||
887 | netif_dbg(priv, tx_done, dev, | ||
888 | "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", | ||
889 | __func__, ring->index, | ||
890 | c_index, last_tx_cn, last_c_index); | ||
891 | |||
892 | /* Reclaim transmitted buffers */ | ||
893 | while (last_tx_cn-- > 0) { | ||
894 | tx_cb_ptr = ring->cbs + last_c_index; | ||
895 | if (tx_cb_ptr->skb) { | ||
896 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | ||
897 | dma_unmap_single(&dev->dev, | ||
898 | dma_unmap_addr(tx_cb_ptr, dma_addr), | ||
899 | tx_cb_ptr->skb->len, | ||
900 | DMA_TO_DEVICE); | ||
901 | bcmgenet_free_cb(tx_cb_ptr); | ||
902 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | ||
903 | dev->stats.tx_bytes += | ||
904 | dma_unmap_len(tx_cb_ptr, dma_len); | ||
905 | dma_unmap_page(&dev->dev, | ||
906 | dma_unmap_addr(tx_cb_ptr, dma_addr), | ||
907 | dma_unmap_len(tx_cb_ptr, dma_len), | ||
908 | DMA_TO_DEVICE); | ||
909 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); | ||
910 | } | ||
911 | dev->stats.tx_packets++; | ||
912 | ring->free_bds += 1; | ||
913 | |||
914 | last_c_index++; | ||
915 | last_c_index &= (num_tx_bds - 1); | ||
916 | } | ||
917 | |||
918 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) | ||
919 | ring->int_disable(priv, ring); | ||
920 | |||
921 | if (__netif_subqueue_stopped(dev, ring->queue)) | ||
922 | netif_wake_subqueue(dev, ring->queue); | ||
923 | |||
924 | ring->c_index = c_index; | ||
925 | } | ||
926 | |||
927 | static void bcmgenet_tx_reclaim(struct net_device *dev, | ||
928 | struct bcmgenet_tx_ring *ring) | ||
929 | { | ||
930 | unsigned long flags; | ||
931 | |||
932 | spin_lock_irqsave(&ring->lock, flags); | ||
933 | __bcmgenet_tx_reclaim(dev, ring); | ||
934 | spin_unlock_irqrestore(&ring->lock, flags); | ||
935 | } | ||
936 | |||
937 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | ||
938 | { | ||
939 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
940 | int i; | ||
941 | |||
942 | if (netif_is_multiqueue(dev)) { | ||
943 | for (i = 0; i < priv->hw_params->tx_queues; i++) | ||
944 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); | ||
945 | } | ||
946 | |||
947 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); | ||
948 | } | ||
949 | |||
950 | /* Transmits a single SKB (either head of a fragment or a single SKB) | ||
951 | * caller must hold priv->lock | ||
952 | */ | ||
953 | static int bcmgenet_xmit_single(struct net_device *dev, | ||
954 | struct sk_buff *skb, | ||
955 | u16 dma_desc_flags, | ||
956 | struct bcmgenet_tx_ring *ring) | ||
957 | { | ||
958 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
959 | struct device *kdev = &priv->pdev->dev; | ||
960 | struct enet_cb *tx_cb_ptr; | ||
961 | unsigned int skb_len; | ||
962 | dma_addr_t mapping; | ||
963 | u32 length_status; | ||
964 | int ret; | ||
965 | |||
966 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | ||
967 | |||
968 | if (unlikely(!tx_cb_ptr)) | ||
969 | BUG(); | ||
970 | |||
971 | tx_cb_ptr->skb = skb; | ||
972 | |||
973 | skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); | ||
974 | |||
975 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | ||
976 | ret = dma_mapping_error(kdev, mapping); | ||
977 | if (ret) { | ||
978 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); | ||
979 | dev_kfree_skb(skb); | ||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | ||
984 | dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); | ||
985 | length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | | ||
986 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | | ||
987 | DMA_TX_APPEND_CRC; | ||
988 | |||
989 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
990 | length_status |= DMA_TX_DO_CSUM; | ||
991 | |||
992 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); | ||
993 | |||
994 | /* Decrement total BD count and advance our write pointer */ | ||
995 | ring->free_bds -= 1; | ||
996 | ring->prod_index += 1; | ||
997 | ring->prod_index &= DMA_P_INDEX_MASK; | ||
998 | |||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | /* Transmit a SKB fragement */ | ||
1003 | static int bcmgenet_xmit_frag(struct net_device *dev, | ||
1004 | skb_frag_t *frag, | ||
1005 | u16 dma_desc_flags, | ||
1006 | struct bcmgenet_tx_ring *ring) | ||
1007 | { | ||
1008 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
1009 | struct device *kdev = &priv->pdev->dev; | ||
1010 | struct enet_cb *tx_cb_ptr; | ||
1011 | dma_addr_t mapping; | ||
1012 | int ret; | ||
1013 | |||
1014 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | ||
1015 | |||
1016 | if (unlikely(!tx_cb_ptr)) | ||
1017 | BUG(); | ||
1018 | tx_cb_ptr->skb = NULL; | ||
1019 | |||
1020 | mapping = skb_frag_dma_map(kdev, frag, 0, | ||
1021 | skb_frag_size(frag), DMA_TO_DEVICE); | ||
1022 | ret = dma_mapping_error(kdev, mapping); | ||
1023 | if (ret) { | ||
1024 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", | ||
1025 | __func__); | ||
1026 | return ret; | ||
1027 | } | ||
1028 | |||
1029 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | ||
1030 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); | ||
1031 | |||
1032 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | ||
1033 | (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | | ||
1034 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); | ||
1035 | |||
1036 | |||
1037 | ring->free_bds -= 1; | ||
1038 | ring->prod_index += 1; | ||
1039 | ring->prod_index &= DMA_P_INDEX_MASK; | ||
1040 | |||
1041 | return 0; | ||
1042 | } | ||
1043 | |||
1044 | /* Reallocate the SKB to put enough headroom in front of it and insert | ||
1045 | * the transmit checksum offsets in the descriptors | ||
1046 | */ | ||
1047 | static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) | ||
1048 | { | ||
1049 | struct status_64 *status = NULL; | ||
1050 | struct sk_buff *new_skb; | ||
1051 | u16 offset; | ||
1052 | u8 ip_proto; | ||
1053 | u16 ip_ver; | ||
1054 | u32 tx_csum_info; | ||
1055 | |||
1056 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { | ||
1057 | /* If 64 byte status block enabled, must make sure skb has | ||
1058 | * enough headroom for us to insert 64B status block. | ||
1059 | */ | ||
1060 | new_skb = skb_realloc_headroom(skb, sizeof(*status)); | ||
1061 | dev_kfree_skb(skb); | ||
1062 | if (!new_skb) { | ||
1063 | dev->stats.tx_errors++; | ||
1064 | dev->stats.tx_dropped++; | ||
1065 | return -ENOMEM; | ||
1066 | } | ||
1067 | skb = new_skb; | ||
1068 | } | ||
1069 | |||
1070 | skb_push(skb, sizeof(*status)); | ||
1071 | status = (struct status_64 *)skb->data; | ||
1072 | |||
1073 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1074 | ip_ver = htons(skb->protocol); | ||
1075 | switch (ip_ver) { | ||
1076 | case ETH_P_IP: | ||
1077 | ip_proto = ip_hdr(skb)->protocol; | ||
1078 | break; | ||
1079 | case ETH_P_IPV6: | ||
1080 | ip_proto = ipv6_hdr(skb)->nexthdr; | ||
1081 | break; | ||
1082 | default: | ||
1083 | return 0; | ||
1084 | } | ||
1085 | |||
1086 | offset = skb_checksum_start_offset(skb) - sizeof(*status); | ||
1087 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | | ||
1088 | (offset + skb->csum_offset); | ||
1089 | |||
1090 | /* Set the length valid bit for TCP and UDP and just set | ||
1091 | * the special UDP flag for IPv4, else just set to 0. | ||
1092 | */ | ||
1093 | if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { | ||
1094 | tx_csum_info |= STATUS_TX_CSUM_LV; | ||
1095 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | ||
1096 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | ||
1097 | } else | ||
1098 | tx_csum_info = 0; | ||
1099 | |||
1100 | status->tx_csum_info = tx_csum_info; | ||
1101 | } | ||
1102 | |||
1103 | return 0; | ||
1104 | } | ||
1105 | |||
1106 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1107 | { | ||
1108 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
1109 | struct bcmgenet_tx_ring *ring = NULL; | ||
1110 | unsigned long flags = 0; | ||
1111 | int nr_frags, index; | ||
1112 | u16 dma_desc_flags; | ||
1113 | int ret; | ||
1114 | int i; | ||
1115 | |||
1116 | index = skb_get_queue_mapping(skb); | ||
1117 | /* Mapping strategy: | ||
1118 | * queue_mapping = 0, unclassified, packet xmited through ring16 | ||
1119 | * queue_mapping = 1, goes to ring 0. (highest priority queue | ||
1120 | * queue_mapping = 2, goes to ring 1. | ||
1121 | * queue_mapping = 3, goes to ring 2. | ||
1122 | * queue_mapping = 4, goes to ring 3. | ||
1123 | */ | ||
1124 | if (index == 0) | ||
1125 | index = DESC_INDEX; | ||
1126 | else | ||
1127 | index -= 1; | ||
1128 | |||
1129 | if ((index != DESC_INDEX) && (index > priv->hw_params->tx_queues - 1)) { | ||
1130 | netdev_err(dev, "%s: queue_mapping %d is invalid\n", | ||
1131 | __func__, skb_get_queue_mapping(skb)); | ||
1132 | dev->stats.tx_errors++; | ||
1133 | dev->stats.tx_dropped++; | ||
1134 | ret = NETDEV_TX_OK; | ||
1135 | goto out; | ||
1136 | } | ||
1137 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
1138 | ring = &priv->tx_rings[index]; | ||
1139 | |||
1140 | spin_lock_irqsave(&ring->lock, flags); | ||
1141 | if (ring->free_bds <= nr_frags + 1) { | ||
1142 | netif_stop_subqueue(dev, ring->queue); | ||
1143 | netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", | ||
1144 | __func__, index, ring->queue); | ||
1145 | ret = NETDEV_TX_BUSY; | ||
1146 | goto out; | ||
1147 | } | ||
1148 | |||
1149 | /* reclaim xmited skb every 8 packets. */ | ||
1150 | /*if (ring->free_bds < ring->size - 8)*/ | ||
1151 | /*__bcmgenet_tx_reclaim(dev, ring);*/ | ||
1152 | |||
1153 | /* set the SKB transmit checksum */ | ||
1154 | if (priv->desc_64b_en) { | ||
1155 | ret = bcmgenet_put_tx_csum(dev, skb); | ||
1156 | if (ret) { | ||
1157 | ret = NETDEV_TX_OK; | ||
1158 | goto out; | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | dma_desc_flags = DMA_SOP; | ||
1163 | if (nr_frags == 0) | ||
1164 | dma_desc_flags |= DMA_EOP; | ||
1165 | |||
1166 | /* Transmit single SKB or head of fragment list */ | ||
1167 | ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); | ||
1168 | if (ret) { | ||
1169 | ret = NETDEV_TX_OK; | ||
1170 | goto out; | ||
1171 | } | ||
1172 | |||
1173 | /* xmit fragment */ | ||
1174 | for (i = 0; i < nr_frags; i++) { | ||
1175 | ret = bcmgenet_xmit_frag(dev, | ||
1176 | &skb_shinfo(skb)->frags[i], | ||
1177 | (i == nr_frags - 1) ? DMA_EOP : 0, ring); | ||
1178 | if (ret) { | ||
1179 | ret = NETDEV_TX_OK; | ||
1180 | goto out; | ||
1181 | } | ||
1182 | } | ||
1183 | |||
1184 | /* we kept a software copy of how much we should advance the TDMA | ||
1185 | * producer index, now write it down to the hardware | ||
1186 | */ | ||
1187 | bcmgenet_tdma_ring_writel(priv, ring->index, | ||
1188 | ring->prod_index, TDMA_PROD_INDEX); | ||
1189 | |||
1190 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { | ||
1191 | netif_stop_subqueue(dev, ring->queue); | ||
1192 | ring->int_enable(priv, ring); | ||
1193 | } | ||
1194 | |||
1195 | out: | ||
1196 | spin_unlock_irqrestore(&ring->lock, flags); | ||
1197 | |||
1198 | return ret; | ||
1199 | } | ||
1200 | |||
1201 | |||
1202 | static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, | ||
1203 | struct enet_cb *cb) | ||
1204 | { | ||
1205 | struct device *kdev = &priv->pdev->dev; | ||
1206 | struct sk_buff *skb; | ||
1207 | dma_addr_t mapping; | ||
1208 | int ret; | ||
1209 | |||
1210 | skb = netdev_alloc_skb(priv->dev, | ||
1211 | priv->rx_buf_len + SKB_ALIGNMENT); | ||
1212 | if (!skb) | ||
1213 | return -ENOMEM; | ||
1214 | |||
1215 | /* a caller did not release this control block */ | ||
1216 | WARN_ON(cb->skb != NULL); | ||
1217 | cb->skb = skb; | ||
1218 | mapping = dma_map_single(kdev, skb->data, | ||
1219 | priv->rx_buf_len, DMA_FROM_DEVICE); | ||
1220 | ret = dma_mapping_error(kdev, mapping); | ||
1221 | if (ret) { | ||
1222 | bcmgenet_free_cb(cb); | ||
1223 | netif_err(priv, rx_err, priv->dev, | ||
1224 | "%s DMA map failed\n", __func__); | ||
1225 | return ret; | ||
1226 | } | ||
1227 | |||
1228 | dma_unmap_addr_set(cb, dma_addr, mapping); | ||
1229 | /* assign packet, prepare descriptor, and advance pointer */ | ||
1230 | |||
1231 | dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); | ||
1232 | |||
1233 | /* turn on the newly assigned BD for DMA to use */ | ||
1234 | priv->rx_bd_assign_index++; | ||
1235 | priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); | ||
1236 | |||
1237 | priv->rx_bd_assign_ptr = priv->rx_bds + | ||
1238 | (priv->rx_bd_assign_index * DMA_DESC_SIZE); | ||
1239 | |||
1240 | return 0; | ||
1241 | } | ||
1242 | |||
1243 | /* bcmgenet_desc_rx - descriptor based rx process. | ||
1244 | * this could be called from bottom half, or from NAPI polling method. | ||
1245 | */ | ||
1246 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | ||
1247 | unsigned int budget) | ||
1248 | { | ||
1249 | struct net_device *dev = priv->dev; | ||
1250 | struct enet_cb *cb; | ||
1251 | struct sk_buff *skb; | ||
1252 | u32 dma_length_status; | ||
1253 | unsigned long dma_flag; | ||
1254 | int len, err; | ||
1255 | unsigned int rxpktprocessed = 0, rxpkttoprocess; | ||
1256 | unsigned int p_index; | ||
1257 | unsigned int chksum_ok = 0; | ||
1258 | |||
1259 | p_index = bcmgenet_rdma_ring_readl(priv, | ||
1260 | DESC_INDEX, RDMA_PROD_INDEX); | ||
1261 | p_index &= DMA_P_INDEX_MASK; | ||
1262 | |||
1263 | if (p_index < priv->rx_c_index) | ||
1264 | rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - | ||
1265 | priv->rx_c_index + p_index; | ||
1266 | else | ||
1267 | rxpkttoprocess = p_index - priv->rx_c_index; | ||
1268 | |||
1269 | netif_dbg(priv, rx_status, dev, | ||
1270 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); | ||
1271 | |||
1272 | while ((rxpktprocessed < rxpkttoprocess) && | ||
1273 | (rxpktprocessed < budget)) { | ||
1274 | |||
1275 | /* Unmap the packet contents such that we can use the | ||
1276 | * RSV from the 64 bytes descriptor when enabled and save | ||
1277 | * a 32-bits register read | ||
1278 | */ | ||
1279 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | ||
1280 | skb = cb->skb; | ||
1281 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | ||
1282 | priv->rx_buf_len, DMA_FROM_DEVICE); | ||
1283 | |||
1284 | if (!priv->desc_64b_en) { | ||
1285 | dma_length_status = dmadesc_get_length_status(priv, | ||
1286 | priv->rx_bds + | ||
1287 | (priv->rx_read_ptr * | ||
1288 | DMA_DESC_SIZE)); | ||
1289 | } else { | ||
1290 | struct status_64 *status; | ||
1291 | status = (struct status_64 *)skb->data; | ||
1292 | dma_length_status = status->length_status; | ||
1293 | } | ||
1294 | |||
1295 | /* DMA flags and length are still valid no matter how | ||
1296 | * we got the Receive Status Vector (64B RSB or register) | ||
1297 | */ | ||
1298 | dma_flag = dma_length_status & 0xffff; | ||
1299 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | ||
1300 | |||
1301 | netif_dbg(priv, rx_status, dev, | ||
1302 | "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", | ||
1303 | __func__, p_index, priv->rx_c_index, priv->rx_read_ptr, | ||
1304 | dma_length_status); | ||
1305 | |||
1306 | rxpktprocessed++; | ||
1307 | |||
1308 | priv->rx_read_ptr++; | ||
1309 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | ||
1310 | |||
1311 | /* out of memory, just drop packets at the hardware level */ | ||
1312 | if (unlikely(!skb)) { | ||
1313 | dev->stats.rx_dropped++; | ||
1314 | dev->stats.rx_errors++; | ||
1315 | goto refill; | ||
1316 | } | ||
1317 | |||
1318 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | ||
1319 | netif_err(priv, rx_status, dev, | ||
1320 | "Droping fragmented packet!\n"); | ||
1321 | dev->stats.rx_dropped++; | ||
1322 | dev->stats.rx_errors++; | ||
1323 | dev_kfree_skb_any(cb->skb); | ||
1324 | cb->skb = NULL; | ||
1325 | goto refill; | ||
1326 | } | ||
1327 | /* report errors */ | ||
1328 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | | ||
1329 | DMA_RX_OV | | ||
1330 | DMA_RX_NO | | ||
1331 | DMA_RX_LG | | ||
1332 | DMA_RX_RXER))) { | ||
1333 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | ||
1334 | (unsigned int)dma_flag); | ||
1335 | if (dma_flag & DMA_RX_CRC_ERROR) | ||
1336 | dev->stats.rx_crc_errors++; | ||
1337 | if (dma_flag & DMA_RX_OV) | ||
1338 | dev->stats.rx_over_errors++; | ||
1339 | if (dma_flag & DMA_RX_NO) | ||
1340 | dev->stats.rx_frame_errors++; | ||
1341 | if (dma_flag & DMA_RX_LG) | ||
1342 | dev->stats.rx_length_errors++; | ||
1343 | dev->stats.rx_dropped++; | ||
1344 | dev->stats.rx_errors++; | ||
1345 | |||
1346 | /* discard the packet and advance consumer index.*/ | ||
1347 | dev_kfree_skb_any(cb->skb); | ||
1348 | cb->skb = NULL; | ||
1349 | goto refill; | ||
1350 | } /* error packet */ | ||
1351 | |||
1352 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | ||
1353 | priv->desc_rxchk_en; | ||
1354 | |||
1355 | skb_put(skb, len); | ||
1356 | if (priv->desc_64b_en) { | ||
1357 | skb_pull(skb, 64); | ||
1358 | len -= 64; | ||
1359 | } | ||
1360 | |||
1361 | if (likely(chksum_ok)) | ||
1362 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1363 | |||
1364 | /* remove hardware 2bytes added for IP alignment */ | ||
1365 | skb_pull(skb, 2); | ||
1366 | len -= 2; | ||
1367 | |||
1368 | if (priv->crc_fwd_en) { | ||
1369 | skb_trim(skb, len - ETH_FCS_LEN); | ||
1370 | len -= ETH_FCS_LEN; | ||
1371 | } | ||
1372 | |||
1373 | /*Finish setting up the received SKB and send it to the kernel*/ | ||
1374 | skb->protocol = eth_type_trans(skb, priv->dev); | ||
1375 | dev->stats.rx_packets++; | ||
1376 | dev->stats.rx_bytes += len; | ||
1377 | if (dma_flag & DMA_RX_MULT) | ||
1378 | dev->stats.multicast++; | ||
1379 | |||
1380 | /* Notify kernel */ | ||
1381 | napi_gro_receive(&priv->napi, skb); | ||
1382 | cb->skb = NULL; | ||
1383 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); | ||
1384 | |||
1385 | /* refill RX path on the current control block */ | ||
1386 | refill: | ||
1387 | err = bcmgenet_rx_refill(priv, cb); | ||
1388 | if (err) | ||
1389 | netif_err(priv, rx_err, dev, "Rx refill failed\n"); | ||
1390 | } | ||
1391 | |||
1392 | return rxpktprocessed; | ||
1393 | } | ||
1394 | |||
1395 | /* Assign skb to RX DMA descriptor. */ | ||
1396 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) | ||
1397 | { | ||
1398 | struct enet_cb *cb; | ||
1399 | int ret = 0; | ||
1400 | int i; | ||
1401 | |||
1402 | netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); | ||
1403 | |||
1404 | /* loop here for each buffer needing assign */ | ||
1405 | for (i = 0; i < priv->num_rx_bds; i++) { | ||
1406 | cb = &priv->rx_cbs[priv->rx_bd_assign_index]; | ||
1407 | if (cb->skb) | ||
1408 | continue; | ||
1409 | |||
1410 | /* set the DMA descriptor length once and for all | ||
1411 | * it will only change if we support dynamically sizing | ||
1412 | * priv->rx_buf_len, but we do not | ||
1413 | */ | ||
1414 | dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr, | ||
1415 | priv->rx_buf_len << DMA_BUFLENGTH_SHIFT); | ||
1416 | |||
1417 | ret = bcmgenet_rx_refill(priv, cb); | ||
1418 | if (ret) | ||
1419 | break; | ||
1420 | |||
1421 | } | ||
1422 | |||
1423 | return ret; | ||
1424 | } | ||
1425 | |||
1426 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | ||
1427 | { | ||
1428 | struct enet_cb *cb; | ||
1429 | int i; | ||
1430 | |||
1431 | for (i = 0; i < priv->num_rx_bds; i++) { | ||
1432 | cb = &priv->rx_cbs[i]; | ||
1433 | |||
1434 | if (dma_unmap_addr(cb, dma_addr)) { | ||
1435 | dma_unmap_single(&priv->dev->dev, | ||
1436 | dma_unmap_addr(cb, dma_addr), | ||
1437 | priv->rx_buf_len, DMA_FROM_DEVICE); | ||
1438 | dma_unmap_addr_set(cb, dma_addr, 0); | ||
1439 | } | ||
1440 | |||
1441 | if (cb->skb) | ||
1442 | bcmgenet_free_cb(cb); | ||
1443 | } | ||
1444 | } | ||
1445 | |||
1446 | static int reset_umac(struct bcmgenet_priv *priv) | ||
1447 | { | ||
1448 | struct device *kdev = &priv->pdev->dev; | ||
1449 | unsigned int timeout = 0; | ||
1450 | u32 reg; | ||
1451 | |||
1452 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ | ||
1453 | bcmgenet_rbuf_ctrl_set(priv, 0); | ||
1454 | udelay(10); | ||
1455 | |||
1456 | /* disable MAC while updating its registers */ | ||
1457 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | ||
1458 | |||
1459 | /* issue soft reset, wait for it to complete */ | ||
1460 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); | ||
1461 | while (timeout++ < 1000) { | ||
1462 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
1463 | if (!(reg & CMD_SW_RESET)) | ||
1464 | return 0; | ||
1465 | |||
1466 | udelay(1); | ||
1467 | } | ||
1468 | |||
1469 | if (timeout == 1000) { | ||
1470 | dev_err(kdev, | ||
1471 | "timeout waiting for MAC to come out of resetn\n"); | ||
1472 | return -ETIMEDOUT; | ||
1473 | } | ||
1474 | |||
1475 | return 0; | ||
1476 | } | ||
1477 | |||
1478 | static int init_umac(struct bcmgenet_priv *priv) | ||
1479 | { | ||
1480 | struct device *kdev = &priv->pdev->dev; | ||
1481 | int ret; | ||
1482 | u32 reg, cpu_mask_clear; | ||
1483 | |||
1484 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | ||
1485 | |||
1486 | ret = reset_umac(priv); | ||
1487 | if (ret) | ||
1488 | return ret; | ||
1489 | |||
1490 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | ||
1491 | /* clear tx/rx counter */ | ||
1492 | bcmgenet_umac_writel(priv, | ||
1493 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL); | ||
1494 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); | ||
1495 | |||
1496 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | ||
1497 | |||
1498 | /* init rx registers, enable ip header optimization */ | ||
1499 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | ||
1500 | reg |= RBUF_ALIGN_2B; | ||
1501 | bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); | ||
1502 | |||
1503 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | ||
1504 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | ||
1505 | |||
1506 | /* Mask all interrupts.*/ | ||
1507 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | ||
1508 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | ||
1509 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | ||
1510 | |||
1511 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; | ||
1512 | |||
1513 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | ||
1514 | |||
1515 | /* Monitor cable plug/unpluged event for internal PHY */ | ||
1516 | if (phy_is_internal(priv->phydev)) | ||
1517 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); | ||
1518 | else if (priv->ext_phy) | ||
1519 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); | ||
1520 | else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | ||
1521 | reg = bcmgenet_bp_mc_get(priv); | ||
1522 | reg |= BIT(priv->hw_params->bp_in_en_shift); | ||
1523 | |||
1524 | /* bp_mask: back pressure mask */ | ||
1525 | if (netif_is_multiqueue(priv->dev)) | ||
1526 | reg |= priv->hw_params->bp_in_mask; | ||
1527 | else | ||
1528 | reg &= ~priv->hw_params->bp_in_mask; | ||
1529 | bcmgenet_bp_mc_set(priv, reg); | ||
1530 | } | ||
1531 | |||
1532 | /* Enable MDIO interrupts on GENET v3+ */ | ||
1533 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | ||
1534 | cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; | ||
1535 | |||
1536 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, | ||
1537 | INTRL2_CPU_MASK_CLEAR); | ||
1538 | |||
1539 | /* Enable rx/tx engine.*/ | ||
1540 | dev_dbg(kdev, "done init umac\n"); | ||
1541 | |||
1542 | return 0; | ||
1543 | } | ||
1544 | |||
1545 | /* Initialize all house-keeping variables for a TX ring, along | ||
1546 | * with corresponding hardware registers | ||
1547 | */ | ||
1548 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | ||
1549 | unsigned int index, unsigned int size, | ||
1550 | unsigned int write_ptr, unsigned int end_ptr) | ||
1551 | { | ||
1552 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | ||
1553 | u32 words_per_bd = WORDS_PER_BD(priv); | ||
1554 | u32 flow_period_val = 0; | ||
1555 | unsigned int first_bd; | ||
1556 | |||
1557 | spin_lock_init(&ring->lock); | ||
1558 | ring->index = index; | ||
1559 | if (index == DESC_INDEX) { | ||
1560 | ring->queue = 0; | ||
1561 | ring->int_enable = bcmgenet_tx_ring16_int_enable; | ||
1562 | ring->int_disable = bcmgenet_tx_ring16_int_disable; | ||
1563 | } else { | ||
1564 | ring->queue = index + 1; | ||
1565 | ring->int_enable = bcmgenet_tx_ring_int_enable; | ||
1566 | ring->int_disable = bcmgenet_tx_ring_int_disable; | ||
1567 | } | ||
1568 | ring->cbs = priv->tx_cbs + write_ptr; | ||
1569 | ring->size = size; | ||
1570 | ring->c_index = 0; | ||
1571 | ring->free_bds = size; | ||
1572 | ring->write_ptr = write_ptr; | ||
1573 | ring->cb_ptr = write_ptr; | ||
1574 | ring->end_ptr = end_ptr - 1; | ||
1575 | ring->prod_index = 0; | ||
1576 | |||
1577 | /* Set flow period for ring != 16 */ | ||
1578 | if (index != DESC_INDEX) | ||
1579 | flow_period_val = ENET_MAX_MTU_SIZE << 16; | ||
1580 | |||
1581 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); | ||
1582 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); | ||
1583 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | ||
1584 | /* Disable rate control for now */ | ||
1585 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | ||
1586 | TDMA_FLOW_PERIOD); | ||
1587 | /* Unclassified traffic goes to ring 16 */ | ||
1588 | bcmgenet_tdma_ring_writel(priv, index, | ||
1589 | ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), | ||
1590 | DMA_RING_BUF_SIZE); | ||
1591 | |||
1592 | first_bd = write_ptr; | ||
1593 | |||
1594 | /* Set start and end address, read and write pointers */ | ||
1595 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | ||
1596 | DMA_START_ADDR); | ||
1597 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | ||
1598 | TDMA_READ_PTR); | ||
1599 | bcmgenet_tdma_ring_writel(priv, index, first_bd, | ||
1600 | TDMA_WRITE_PTR); | ||
1601 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | ||
1602 | DMA_END_ADDR); | ||
1603 | } | ||
1604 | |||
1605 | /* Initialize a RDMA ring */ | ||
1606 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | ||
1607 | unsigned int index, unsigned int size) | ||
1608 | { | ||
1609 | u32 words_per_bd = WORDS_PER_BD(priv); | ||
1610 | int ret; | ||
1611 | |||
1612 | priv->num_rx_bds = TOTAL_DESC; | ||
1613 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; | ||
1614 | priv->rx_bd_assign_ptr = priv->rx_bds; | ||
1615 | priv->rx_bd_assign_index = 0; | ||
1616 | priv->rx_c_index = 0; | ||
1617 | priv->rx_read_ptr = 0; | ||
1618 | priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb), | ||
1619 | GFP_KERNEL); | ||
1620 | if (!priv->rx_cbs) | ||
1621 | return -ENOMEM; | ||
1622 | |||
1623 | ret = bcmgenet_alloc_rx_buffers(priv); | ||
1624 | if (ret) { | ||
1625 | kfree(priv->rx_cbs); | ||
1626 | return ret; | ||
1627 | } | ||
1628 | |||
1629 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); | ||
1630 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); | ||
1631 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | ||
1632 | bcmgenet_rdma_ring_writel(priv, index, | ||
1633 | ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), | ||
1634 | DMA_RING_BUF_SIZE); | ||
1635 | bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); | ||
1636 | bcmgenet_rdma_ring_writel(priv, index, | ||
1637 | words_per_bd * size - 1, DMA_END_ADDR); | ||
1638 | bcmgenet_rdma_ring_writel(priv, index, | ||
1639 | (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | | ||
1640 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | ||
1641 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); | ||
1642 | |||
1643 | return ret; | ||
1644 | } | ||
1645 | |||
1646 | /* init multi xmit queues, only available for GENET2+ | ||
1647 | * the queue is partitioned as follows: | ||
1648 | * | ||
1649 | * queue 0 - 3 is priority based, each one has 32 descriptors, | ||
1650 | * with queue 0 being the highest priority queue. | ||
1651 | * | ||
1652 | * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT | ||
1653 | * descriptors: 256 - (number of tx queues * bds per queues) = 128 | ||
1654 | * descriptors. | ||
1655 | * | ||
1656 | * The transmit control block pool is then partitioned as following: | ||
1657 | * - tx_cbs[0...127] are for queue 16 | ||
1658 | * - tx_ring_cbs[0] points to tx_cbs[128..159] | ||
1659 | * - tx_ring_cbs[1] points to tx_cbs[160..191] | ||
1660 | * - tx_ring_cbs[2] points to tx_cbs[192..223] | ||
1661 | * - tx_ring_cbs[3] points to tx_cbs[224..255] | ||
1662 | */ | ||
1663 | static void bcmgenet_init_multiq(struct net_device *dev) | ||
1664 | { | ||
1665 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
1666 | unsigned int i, dma_enable; | ||
1667 | u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0; | ||
1668 | |||
1669 | if (!netif_is_multiqueue(dev)) { | ||
1670 | netdev_warn(dev, "called with non multi queue aware HW\n"); | ||
1671 | return; | ||
1672 | } | ||
1673 | |||
1674 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1675 | dma_enable = dma_ctrl & DMA_EN; | ||
1676 | dma_ctrl &= ~DMA_EN; | ||
1677 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | ||
1678 | |||
1679 | /* Enable strict priority arbiter mode */ | ||
1680 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | ||
1681 | |||
1682 | for (i = 0; i < priv->hw_params->tx_queues; i++) { | ||
1683 | /* first 64 tx_cbs are reserved for default tx queue | ||
1684 | * (ring 16) | ||
1685 | */ | ||
1686 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, | ||
1687 | i * priv->hw_params->bds_cnt, | ||
1688 | (i + 1) * priv->hw_params->bds_cnt); | ||
1689 | |||
1690 | /* Configure ring as decriptor ring and setup priority */ | ||
1691 | ring_cfg |= 1 << i; | ||
1692 | dma_priority |= ((GENET_Q0_PRIORITY + i) << | ||
1693 | (GENET_MAX_MQ_CNT + 1) * i); | ||
1694 | dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); | ||
1695 | } | ||
1696 | |||
1697 | /* Enable rings */ | ||
1698 | reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); | ||
1699 | reg |= ring_cfg; | ||
1700 | bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); | ||
1701 | |||
1702 | /* Use configured rings priority and set ring #16 priority */ | ||
1703 | reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY); | ||
1704 | reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20); | ||
1705 | reg |= dma_priority; | ||
1706 | bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY); | ||
1707 | |||
1708 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | ||
1709 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1710 | reg |= dma_ctrl; | ||
1711 | if (dma_enable) | ||
1712 | reg |= DMA_EN; | ||
1713 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
1714 | } | ||
1715 | |||
1716 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | ||
1717 | { | ||
1718 | int i; | ||
1719 | |||
1720 | /* disable DMA */ | ||
1721 | bcmgenet_rdma_writel(priv, 0, DMA_CTRL); | ||
1722 | bcmgenet_tdma_writel(priv, 0, DMA_CTRL); | ||
1723 | |||
1724 | for (i = 0; i < priv->num_tx_bds; i++) { | ||
1725 | if (priv->tx_cbs[i].skb != NULL) { | ||
1726 | dev_kfree_skb(priv->tx_cbs[i].skb); | ||
1727 | priv->tx_cbs[i].skb = NULL; | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | bcmgenet_free_rx_buffers(priv); | ||
1732 | kfree(priv->rx_cbs); | ||
1733 | kfree(priv->tx_cbs); | ||
1734 | } | ||
1735 | |||
1736 | /* init_edma: Initialize DMA control register */ | ||
1737 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | ||
1738 | { | ||
1739 | int ret; | ||
1740 | |||
1741 | netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); | ||
1742 | |||
1743 | /* by default, enable ring 16 (descriptor based) */ | ||
1744 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); | ||
1745 | if (ret) { | ||
1746 | netdev_err(priv->dev, "failed to initialize RX ring\n"); | ||
1747 | return ret; | ||
1748 | } | ||
1749 | |||
1750 | /* init rDma */ | ||
1751 | bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | ||
1752 | |||
1753 | /* Init tDma */ | ||
1754 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | ||
1755 | |||
1756 | /* Initialize commont TX ring structures */ | ||
1757 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; | ||
1758 | priv->num_tx_bds = TOTAL_DESC; | ||
1759 | priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb), | ||
1760 | GFP_KERNEL); | ||
1761 | if (!priv->tx_cbs) { | ||
1762 | bcmgenet_fini_dma(priv); | ||
1763 | return -ENOMEM; | ||
1764 | } | ||
1765 | |||
1766 | /* initialize multi xmit queue */ | ||
1767 | bcmgenet_init_multiq(priv->dev); | ||
1768 | |||
1769 | /* initialize special ring 16 */ | ||
1770 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, | ||
1771 | priv->hw_params->tx_queues * priv->hw_params->bds_cnt, | ||
1772 | TOTAL_DESC); | ||
1773 | |||
1774 | return 0; | ||
1775 | } | ||
1776 | |||
1777 | /* NAPI polling method*/ | ||
1778 | static int bcmgenet_poll(struct napi_struct *napi, int budget) | ||
1779 | { | ||
1780 | struct bcmgenet_priv *priv = container_of(napi, | ||
1781 | struct bcmgenet_priv, napi); | ||
1782 | unsigned int work_done; | ||
1783 | |||
1784 | /* tx reclaim */ | ||
1785 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | ||
1786 | |||
1787 | work_done = bcmgenet_desc_rx(priv, budget); | ||
1788 | |||
1789 | /* Advancing our consumer index*/ | ||
1790 | priv->rx_c_index += work_done; | ||
1791 | priv->rx_c_index &= DMA_C_INDEX_MASK; | ||
1792 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, | ||
1793 | priv->rx_c_index, RDMA_CONS_INDEX); | ||
1794 | if (work_done < budget) { | ||
1795 | napi_complete(napi); | ||
1796 | bcmgenet_intrl2_0_writel(priv, | ||
1797 | UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR); | ||
1798 | } | ||
1799 | |||
1800 | return work_done; | ||
1801 | } | ||
1802 | |||
1803 | /* Interrupt bottom half */ | ||
1804 | static void bcmgenet_irq_task(struct work_struct *work) | ||
1805 | { | ||
1806 | struct bcmgenet_priv *priv = container_of( | ||
1807 | work, struct bcmgenet_priv, bcmgenet_irq_work); | ||
1808 | |||
1809 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | ||
1810 | |||
1811 | /* Link UP/DOWN event */ | ||
1812 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | ||
1813 | (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { | ||
1814 | if (priv->phydev) | ||
1815 | phy_mac_interrupt(priv->phydev, | ||
1816 | (priv->irq0_stat & UMAC_IRQ_LINK_UP)); | ||
1817 | priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); | ||
1818 | } | ||
1819 | } | ||
1820 | |||
1821 | /* bcmgenet_isr1: interrupt handler for ring buffer. */ | ||
1822 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | ||
1823 | { | ||
1824 | struct bcmgenet_priv *priv = dev_id; | ||
1825 | unsigned int index; | ||
1826 | |||
1827 | /* Save irq status for bottom-half processing. */ | ||
1828 | priv->irq1_stat = | ||
1829 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | ||
1830 | ~priv->int1_mask; | ||
1831 | /* clear inerrupts*/ | ||
1832 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | ||
1833 | |||
1834 | netif_dbg(priv, intr, priv->dev, | ||
1835 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); | ||
1836 | /* Check the MBDONE interrupts. | ||
1837 | * packet is done, reclaim descriptors | ||
1838 | */ | ||
1839 | if (priv->irq1_stat & 0x0000ffff) { | ||
1840 | index = 0; | ||
1841 | for (index = 0; index < 16; index++) { | ||
1842 | if (priv->irq1_stat & (1 << index)) | ||
1843 | bcmgenet_tx_reclaim(priv->dev, | ||
1844 | &priv->tx_rings[index]); | ||
1845 | } | ||
1846 | } | ||
1847 | return IRQ_HANDLED; | ||
1848 | } | ||
1849 | |||
1850 | /* bcmgenet_isr0: Handle various interrupts. */ | ||
1851 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | ||
1852 | { | ||
1853 | struct bcmgenet_priv *priv = dev_id; | ||
1854 | |||
1855 | /* Save irq status for bottom-half processing. */ | ||
1856 | priv->irq0_stat = | ||
1857 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | ||
1858 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | ||
1859 | /* clear inerrupts*/ | ||
1860 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); | ||
1861 | |||
1862 | netif_dbg(priv, intr, priv->dev, | ||
1863 | "IRQ=0x%x\n", priv->irq0_stat); | ||
1864 | |||
1865 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { | ||
1866 | /* We use NAPI(software interrupt throttling, if | ||
1867 | * Rx Descriptor throttling is not used. | ||
1868 | * Disable interrupt, will be enabled in the poll method. | ||
1869 | */ | ||
1870 | if (likely(napi_schedule_prep(&priv->napi))) { | ||
1871 | bcmgenet_intrl2_0_writel(priv, | ||
1872 | UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET); | ||
1873 | __napi_schedule(&priv->napi); | ||
1874 | } | ||
1875 | } | ||
1876 | if (priv->irq0_stat & | ||
1877 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | ||
1878 | /* Tx reclaim */ | ||
1879 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | ||
1880 | } | ||
1881 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | ||
1882 | UMAC_IRQ_PHY_DET_F | | ||
1883 | UMAC_IRQ_LINK_UP | | ||
1884 | UMAC_IRQ_LINK_DOWN | | ||
1885 | UMAC_IRQ_HFB_SM | | ||
1886 | UMAC_IRQ_HFB_MM | | ||
1887 | UMAC_IRQ_MPD_R)) { | ||
1888 | /* all other interested interrupts handled in bottom half */ | ||
1889 | schedule_work(&priv->bcmgenet_irq_work); | ||
1890 | } | ||
1891 | |||
1892 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | ||
1893 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { | ||
1894 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); | ||
1895 | wake_up(&priv->wq); | ||
1896 | } | ||
1897 | |||
1898 | return IRQ_HANDLED; | ||
1899 | } | ||
1900 | |||
1901 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) | ||
1902 | { | ||
1903 | u32 reg; | ||
1904 | |||
1905 | reg = bcmgenet_rbuf_ctrl_get(priv); | ||
1906 | reg |= BIT(1); | ||
1907 | bcmgenet_rbuf_ctrl_set(priv, reg); | ||
1908 | udelay(10); | ||
1909 | |||
1910 | reg &= ~BIT(1); | ||
1911 | bcmgenet_rbuf_ctrl_set(priv, reg); | ||
1912 | udelay(10); | ||
1913 | } | ||
1914 | |||
1915 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | ||
1916 | unsigned char *addr) | ||
1917 | { | ||
1918 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | ||
1919 | (addr[2] << 8) | addr[3], UMAC_MAC0); | ||
1920 | bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); | ||
1921 | } | ||
1922 | |||
1923 | static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) | ||
1924 | { | ||
1925 | int ret; | ||
1926 | |||
1927 | /* From WOL-enabled suspend, switch to regular clock */ | ||
1928 | clk_disable(priv->clk_wol); | ||
1929 | /* init umac registers to synchronize s/w with h/w */ | ||
1930 | ret = init_umac(priv); | ||
1931 | if (ret) | ||
1932 | return ret; | ||
1933 | |||
1934 | if (priv->phydev) | ||
1935 | phy_init_hw(priv->phydev); | ||
1936 | /* Speed settings must be restored */ | ||
1937 | bcmgenet_mii_config(priv->dev); | ||
1938 | |||
1939 | return 0; | ||
1940 | } | ||
1941 | |||
1942 | /* Returns a reusable dma control register value */ | ||
1943 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) | ||
1944 | { | ||
1945 | u32 reg; | ||
1946 | u32 dma_ctrl; | ||
1947 | |||
1948 | /* disable DMA */ | ||
1949 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; | ||
1950 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1951 | reg &= ~dma_ctrl; | ||
1952 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
1953 | |||
1954 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
1955 | reg &= ~dma_ctrl; | ||
1956 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
1957 | |||
1958 | bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); | ||
1959 | udelay(10); | ||
1960 | bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); | ||
1961 | |||
1962 | return dma_ctrl; | ||
1963 | } | ||
1964 | |||
1965 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | ||
1966 | { | ||
1967 | u32 reg; | ||
1968 | |||
1969 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
1970 | reg |= dma_ctrl; | ||
1971 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
1972 | |||
1973 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1974 | reg |= dma_ctrl; | ||
1975 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
1976 | } | ||
1977 | |||
1978 | static int bcmgenet_open(struct net_device *dev) | ||
1979 | { | ||
1980 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
1981 | unsigned long dma_ctrl; | ||
1982 | u32 reg; | ||
1983 | int ret; | ||
1984 | |||
1985 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); | ||
1986 | |||
1987 | /* Turn on the clock */ | ||
1988 | if (!IS_ERR(priv->clk)) | ||
1989 | clk_prepare_enable(priv->clk); | ||
1990 | |||
1991 | /* take MAC out of reset */ | ||
1992 | bcmgenet_umac_reset(priv); | ||
1993 | |||
1994 | ret = init_umac(priv); | ||
1995 | if (ret) | ||
1996 | goto err_clk_disable; | ||
1997 | |||
1998 | /* disable ethernet MAC while updating its registers */ | ||
1999 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
2000 | reg &= ~(CMD_TX_EN | CMD_RX_EN); | ||
2001 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
2002 | |||
2003 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | ||
2004 | |||
2005 | if (priv->wol_enabled) { | ||
2006 | ret = bcmgenet_wol_resume(priv); | ||
2007 | if (ret) | ||
2008 | return ret; | ||
2009 | } | ||
2010 | |||
2011 | if (phy_is_internal(priv->phydev)) { | ||
2012 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | ||
2013 | reg |= EXT_ENERGY_DET_MASK; | ||
2014 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | ||
2015 | } | ||
2016 | |||
2017 | /* Disable RX/TX DMA and flush TX queues */ | ||
2018 | dma_ctrl = bcmgenet_dma_disable(priv); | ||
2019 | |||
2020 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | ||
2021 | ret = bcmgenet_init_dma(priv); | ||
2022 | if (ret) { | ||
2023 | netdev_err(dev, "failed to initialize DMA\n"); | ||
2024 | goto err_fini_dma; | ||
2025 | } | ||
2026 | |||
2027 | /* Always enable ring 16 - descriptor ring */ | ||
2028 | bcmgenet_enable_dma(priv, dma_ctrl); | ||
2029 | |||
2030 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, | ||
2031 | dev->name, priv); | ||
2032 | if (ret < 0) { | ||
2033 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | ||
2034 | goto err_fini_dma; | ||
2035 | } | ||
2036 | |||
2037 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | ||
2038 | dev->name, priv); | ||
2039 | if (ret < 0) { | ||
2040 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | ||
2041 | goto err_irq0; | ||
2042 | } | ||
2043 | |||
2044 | /* Start the network engine */ | ||
2045 | napi_enable(&priv->napi); | ||
2046 | |||
2047 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
2048 | reg |= (CMD_TX_EN | CMD_RX_EN); | ||
2049 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
2050 | |||
2051 | /* Make sure we reflect the value of CRC_CMD_FWD */ | ||
2052 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | ||
2053 | |||
2054 | device_set_wakeup_capable(&dev->dev, 1); | ||
2055 | |||
2056 | if (phy_is_internal(priv->phydev)) | ||
2057 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); | ||
2058 | |||
2059 | netif_tx_start_all_queues(dev); | ||
2060 | |||
2061 | if (priv->phydev) | ||
2062 | phy_start(priv->phydev); | ||
2063 | |||
2064 | return 0; | ||
2065 | |||
2066 | err_irq0: | ||
2067 | free_irq(priv->irq0, dev); | ||
2068 | err_fini_dma: | ||
2069 | bcmgenet_fini_dma(priv); | ||
2070 | err_clk_disable: | ||
2071 | if (!IS_ERR(priv->clk)) | ||
2072 | clk_disable_unprepare(priv->clk); | ||
2073 | return ret; | ||
2074 | } | ||
2075 | |||
2076 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | ||
2077 | { | ||
2078 | int ret = 0; | ||
2079 | int timeout = 0; | ||
2080 | u32 reg; | ||
2081 | |||
2082 | /* Disable TDMA to stop add more frames in TX DMA */ | ||
2083 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
2084 | reg &= ~DMA_EN; | ||
2085 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
2086 | |||
2087 | /* Check TDMA status register to confirm TDMA is disabled */ | ||
2088 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2089 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | ||
2090 | if (reg & DMA_DISABLED) | ||
2091 | break; | ||
2092 | |||
2093 | udelay(1); | ||
2094 | } | ||
2095 | |||
2096 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2097 | netdev_warn(priv->dev, | ||
2098 | "Timed out while disabling TX DMA\n"); | ||
2099 | ret = -ETIMEDOUT; | ||
2100 | } | ||
2101 | |||
2102 | /* Wait 10ms for packet drain in both tx and rx dma */ | ||
2103 | usleep_range(10000, 20000); | ||
2104 | |||
2105 | /* Disable RDMA */ | ||
2106 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
2107 | reg &= ~DMA_EN; | ||
2108 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
2109 | |||
2110 | timeout = 0; | ||
2111 | /* Check RDMA status register to confirm RDMA is disabled */ | ||
2112 | while (timeout++ < DMA_TIMEOUT_VAL) { | ||
2113 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | ||
2114 | if (reg & DMA_DISABLED) | ||
2115 | break; | ||
2116 | |||
2117 | udelay(1); | ||
2118 | } | ||
2119 | |||
2120 | if (timeout == DMA_TIMEOUT_VAL) { | ||
2121 | netdev_warn(priv->dev, | ||
2122 | "Timed out while disabling RX DMA\n"); | ||
2123 | ret = -ETIMEDOUT; | ||
2124 | } | ||
2125 | |||
2126 | return ret; | ||
2127 | } | ||
2128 | |||
2129 | static int bcmgenet_close(struct net_device *dev) | ||
2130 | { | ||
2131 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
2132 | int ret; | ||
2133 | u32 reg; | ||
2134 | |||
2135 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | ||
2136 | |||
2137 | if (priv->phydev) | ||
2138 | phy_stop(priv->phydev); | ||
2139 | |||
2140 | /* Disable MAC receive */ | ||
2141 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
2142 | reg &= ~CMD_RX_EN; | ||
2143 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
2144 | |||
2145 | netif_tx_stop_all_queues(dev); | ||
2146 | |||
2147 | ret = bcmgenet_dma_teardown(priv); | ||
2148 | if (ret) | ||
2149 | return ret; | ||
2150 | |||
2151 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | ||
2152 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
2153 | reg &= ~CMD_TX_EN; | ||
2154 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
2155 | |||
2156 | napi_disable(&priv->napi); | ||
2157 | |||
2158 | /* tx reclaim */ | ||
2159 | bcmgenet_tx_reclaim_all(dev); | ||
2160 | bcmgenet_fini_dma(priv); | ||
2161 | |||
2162 | free_irq(priv->irq0, priv); | ||
2163 | free_irq(priv->irq1, priv); | ||
2164 | |||
2165 | /* Wait for pending work items to complete - we are stopping | ||
2166 | * the clock now. Since interrupts are disabled, no new work | ||
2167 | * will be scheduled. | ||
2168 | */ | ||
2169 | cancel_work_sync(&priv->bcmgenet_irq_work); | ||
2170 | |||
2171 | if (phy_is_internal(priv->phydev)) | ||
2172 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); | ||
2173 | |||
2174 | if (priv->wol_enabled) | ||
2175 | clk_enable(priv->clk_wol); | ||
2176 | |||
2177 | if (!IS_ERR(priv->clk)) | ||
2178 | clk_disable_unprepare(priv->clk); | ||
2179 | |||
2180 | return 0; | ||
2181 | } | ||
2182 | |||
2183 | static void bcmgenet_timeout(struct net_device *dev) | ||
2184 | { | ||
2185 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
2186 | |||
2187 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | ||
2188 | |||
2189 | dev->trans_start = jiffies; | ||
2190 | |||
2191 | dev->stats.tx_errors++; | ||
2192 | |||
2193 | netif_tx_wake_all_queues(dev); | ||
2194 | } | ||
2195 | |||
2196 | #define MAX_MC_COUNT 16 | ||
2197 | |||
2198 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | ||
2199 | unsigned char *addr, | ||
2200 | int *i, | ||
2201 | int *mc) | ||
2202 | { | ||
2203 | u32 reg; | ||
2204 | |||
2205 | bcmgenet_umac_writel(priv, | ||
2206 | addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4)); | ||
2207 | bcmgenet_umac_writel(priv, | ||
2208 | addr[2] << 24 | addr[3] << 16 | | ||
2209 | addr[4] << 8 | addr[5], | ||
2210 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | ||
2211 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); | ||
2212 | reg |= (1 << (MAX_MC_COUNT - *mc)); | ||
2213 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | ||
2214 | *i += 2; | ||
2215 | (*mc)++; | ||
2216 | } | ||
2217 | |||
2218 | static void bcmgenet_set_rx_mode(struct net_device *dev) | ||
2219 | { | ||
2220 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
2221 | struct netdev_hw_addr *ha; | ||
2222 | int i, mc; | ||
2223 | u32 reg; | ||
2224 | |||
2225 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | ||
2226 | |||
2227 | /* Promiscous mode */ | ||
2228 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | ||
2229 | if (dev->flags & IFF_PROMISC) { | ||
2230 | reg |= CMD_PROMISC; | ||
2231 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
2232 | bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); | ||
2233 | return; | ||
2234 | } else { | ||
2235 | reg &= ~CMD_PROMISC; | ||
2236 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | ||
2237 | } | ||
2238 | |||
2239 | /* UniMac doesn't support ALLMULTI */ | ||
2240 | if (dev->flags & IFF_ALLMULTI) { | ||
2241 | netdev_warn(dev, "ALLMULTI is not supported\n"); | ||
2242 | return; | ||
2243 | } | ||
2244 | |||
2245 | /* update MDF filter */ | ||
2246 | i = 0; | ||
2247 | mc = 0; | ||
2248 | /* Broadcast */ | ||
2249 | bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); | ||
2250 | /* my own address.*/ | ||
2251 | bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); | ||
2252 | /* Unicast list*/ | ||
2253 | if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) | ||
2254 | return; | ||
2255 | |||
2256 | if (!netdev_uc_empty(dev)) | ||
2257 | netdev_for_each_uc_addr(ha, dev) | ||
2258 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | ||
2259 | /* Multicast */ | ||
2260 | if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) | ||
2261 | return; | ||
2262 | |||
2263 | netdev_for_each_mc_addr(ha, dev) | ||
2264 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | ||
2265 | } | ||
2266 | |||
2267 | /* Set the hardware MAC address. */ | ||
2268 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) | ||
2269 | { | ||
2270 | struct sockaddr *addr = p; | ||
2271 | |||
2272 | /* Setting the MAC address at the hardware level is not possible | ||
2273 | * without disabling the UniMAC RX/TX enable bits. | ||
2274 | */ | ||
2275 | if (netif_running(dev)) | ||
2276 | return -EBUSY; | ||
2277 | |||
2278 | ether_addr_copy(dev->dev_addr, addr->sa_data); | ||
2279 | |||
2280 | return 0; | ||
2281 | } | ||
2282 | |||
2283 | static u16 bcmgenet_select_queue(struct net_device *dev, | ||
2284 | struct sk_buff *skb, void *accel_priv) | ||
2285 | { | ||
2286 | return netif_is_multiqueue(dev) ? skb->queue_mapping : 0; | ||
2287 | } | ||
2288 | |||
2289 | static const struct net_device_ops bcmgenet_netdev_ops = { | ||
2290 | .ndo_open = bcmgenet_open, | ||
2291 | .ndo_stop = bcmgenet_close, | ||
2292 | .ndo_start_xmit = bcmgenet_xmit, | ||
2293 | .ndo_select_queue = bcmgenet_select_queue, | ||
2294 | .ndo_tx_timeout = bcmgenet_timeout, | ||
2295 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, | ||
2296 | .ndo_set_mac_address = bcmgenet_set_mac_addr, | ||
2297 | .ndo_do_ioctl = bcmgenet_ioctl, | ||
2298 | .ndo_set_features = bcmgenet_set_features, | ||
2299 | }; | ||
2300 | |||
2301 | /* Array of GENET hardware parameters/characteristics */ | ||
2302 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | ||
2303 | [GENET_V1] = { | ||
2304 | .tx_queues = 0, | ||
2305 | .rx_queues = 0, | ||
2306 | .bds_cnt = 0, | ||
2307 | .bp_in_en_shift = 16, | ||
2308 | .bp_in_mask = 0xffff, | ||
2309 | .hfb_filter_cnt = 16, | ||
2310 | .qtag_mask = 0x1F, | ||
2311 | .hfb_offset = 0x1000, | ||
2312 | .rdma_offset = 0x2000, | ||
2313 | .tdma_offset = 0x3000, | ||
2314 | .words_per_bd = 2, | ||
2315 | }, | ||
2316 | [GENET_V2] = { | ||
2317 | .tx_queues = 4, | ||
2318 | .rx_queues = 4, | ||
2319 | .bds_cnt = 32, | ||
2320 | .bp_in_en_shift = 16, | ||
2321 | .bp_in_mask = 0xffff, | ||
2322 | .hfb_filter_cnt = 16, | ||
2323 | .qtag_mask = 0x1F, | ||
2324 | .tbuf_offset = 0x0600, | ||
2325 | .hfb_offset = 0x1000, | ||
2326 | .hfb_reg_offset = 0x2000, | ||
2327 | .rdma_offset = 0x3000, | ||
2328 | .tdma_offset = 0x4000, | ||
2329 | .words_per_bd = 2, | ||
2330 | .flags = GENET_HAS_EXT, | ||
2331 | }, | ||
2332 | [GENET_V3] = { | ||
2333 | .tx_queues = 4, | ||
2334 | .rx_queues = 4, | ||
2335 | .bds_cnt = 32, | ||
2336 | .bp_in_en_shift = 17, | ||
2337 | .bp_in_mask = 0x1ffff, | ||
2338 | .hfb_filter_cnt = 48, | ||
2339 | .qtag_mask = 0x3F, | ||
2340 | .tbuf_offset = 0x0600, | ||
2341 | .hfb_offset = 0x8000, | ||
2342 | .hfb_reg_offset = 0xfc00, | ||
2343 | .rdma_offset = 0x10000, | ||
2344 | .tdma_offset = 0x11000, | ||
2345 | .words_per_bd = 2, | ||
2346 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, | ||
2347 | }, | ||
2348 | [GENET_V4] = { | ||
2349 | .tx_queues = 4, | ||
2350 | .rx_queues = 4, | ||
2351 | .bds_cnt = 32, | ||
2352 | .bp_in_en_shift = 17, | ||
2353 | .bp_in_mask = 0x1ffff, | ||
2354 | .hfb_filter_cnt = 48, | ||
2355 | .qtag_mask = 0x3F, | ||
2356 | .tbuf_offset = 0x0600, | ||
2357 | .hfb_offset = 0x8000, | ||
2358 | .hfb_reg_offset = 0xfc00, | ||
2359 | .rdma_offset = 0x2000, | ||
2360 | .tdma_offset = 0x4000, | ||
2361 | .words_per_bd = 3, | ||
2362 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, | ||
2363 | }, | ||
2364 | }; | ||
2365 | |||
2366 | /* Infer hardware parameters from the detected GENET version */ | ||
2367 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | ||
2368 | { | ||
2369 | struct bcmgenet_hw_params *params; | ||
2370 | u32 reg; | ||
2371 | u8 major; | ||
2372 | |||
2373 | if (GENET_IS_V4(priv)) { | ||
2374 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | ||
2375 | genet_dma_ring_regs = genet_dma_ring_regs_v4; | ||
2376 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | ||
2377 | priv->version = GENET_V4; | ||
2378 | } else if (GENET_IS_V3(priv)) { | ||
2379 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | ||
2380 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | ||
2381 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | ||
2382 | priv->version = GENET_V3; | ||
2383 | } else if (GENET_IS_V2(priv)) { | ||
2384 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; | ||
2385 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | ||
2386 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | ||
2387 | priv->version = GENET_V2; | ||
2388 | } else if (GENET_IS_V1(priv)) { | ||
2389 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; | ||
2390 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | ||
2391 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | ||
2392 | priv->version = GENET_V1; | ||
2393 | } | ||
2394 | |||
2395 | /* enum genet_version starts at 1 */ | ||
2396 | priv->hw_params = &bcmgenet_hw_params[priv->version]; | ||
2397 | params = priv->hw_params; | ||
2398 | |||
2399 | /* Read GENET HW version */ | ||
2400 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); | ||
2401 | major = (reg >> 24 & 0x0f); | ||
2402 | if (major == 5) | ||
2403 | major = 4; | ||
2404 | else if (major == 0) | ||
2405 | major = 1; | ||
2406 | if (major != priv->version) { | ||
2407 | dev_err(&priv->pdev->dev, | ||
2408 | "GENET version mismatch, got: %d, configured for: %d\n", | ||
2409 | major, priv->version); | ||
2410 | } | ||
2411 | |||
2412 | /* Print the GENET core version */ | ||
2413 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | ||
2414 | major, (reg >> 16) & 0x0f, reg & 0xffff); | ||
2415 | |||
2416 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
2417 | if (!(params->flags & GENET_HAS_40BITS)) | ||
2418 | pr_warn("GENET does not support 40-bits PA\n"); | ||
2419 | #endif | ||
2420 | |||
2421 | pr_debug("Configuration for version: %d\n" | ||
2422 | "TXq: %1d, RXq: %1d, BDs: %1d\n" | ||
2423 | "BP << en: %2d, BP msk: 0x%05x\n" | ||
2424 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | ||
2425 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | ||
2426 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | ||
2427 | "Words/BD: %d\n", | ||
2428 | priv->version, | ||
2429 | params->tx_queues, params->rx_queues, params->bds_cnt, | ||
2430 | params->bp_in_en_shift, params->bp_in_mask, | ||
2431 | params->hfb_filter_cnt, params->qtag_mask, | ||
2432 | params->tbuf_offset, params->hfb_offset, | ||
2433 | params->hfb_reg_offset, | ||
2434 | params->rdma_offset, params->tdma_offset, | ||
2435 | params->words_per_bd); | ||
2436 | } | ||
2437 | |||
2438 | static const struct of_device_id bcmgenet_match[] = { | ||
2439 | { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, | ||
2440 | { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, | ||
2441 | { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, | ||
2442 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | ||
2443 | { }, | ||
2444 | }; | ||
2445 | |||
2446 | static int bcmgenet_probe(struct platform_device *pdev) | ||
2447 | { | ||
2448 | struct device_node *dn = pdev->dev.of_node; | ||
2449 | const struct of_device_id *of_id; | ||
2450 | struct bcmgenet_priv *priv; | ||
2451 | struct net_device *dev; | ||
2452 | const void *macaddr; | ||
2453 | struct resource *r; | ||
2454 | int err = -EIO; | ||
2455 | |||
2456 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ | ||
2457 | dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); | ||
2458 | if (!dev) { | ||
2459 | dev_err(&pdev->dev, "can't allocate net device\n"); | ||
2460 | return -ENOMEM; | ||
2461 | } | ||
2462 | |||
2463 | of_id = of_match_node(bcmgenet_match, dn); | ||
2464 | if (!of_id) | ||
2465 | return -EINVAL; | ||
2466 | |||
2467 | priv = netdev_priv(dev); | ||
2468 | priv->irq0 = platform_get_irq(pdev, 0); | ||
2469 | priv->irq1 = platform_get_irq(pdev, 1); | ||
2470 | if (!priv->irq0 || !priv->irq1) { | ||
2471 | dev_err(&pdev->dev, "can't find IRQs\n"); | ||
2472 | err = -EINVAL; | ||
2473 | goto err; | ||
2474 | } | ||
2475 | |||
2476 | macaddr = of_get_mac_address(dn); | ||
2477 | if (!macaddr) { | ||
2478 | dev_err(&pdev->dev, "can't find MAC address\n"); | ||
2479 | err = -EINVAL; | ||
2480 | goto err; | ||
2481 | } | ||
2482 | |||
2483 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2484 | priv->base = devm_request_and_ioremap(&pdev->dev, r); | ||
2485 | if (!priv->base) { | ||
2486 | dev_err(&pdev->dev, "can't ioremap\n"); | ||
2487 | err = -EINVAL; | ||
2488 | goto err; | ||
2489 | } | ||
2490 | |||
2491 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2492 | dev_set_drvdata(&pdev->dev, dev); | ||
2493 | ether_addr_copy(dev->dev_addr, macaddr); | ||
2494 | dev->watchdog_timeo = 2 * HZ; | ||
2495 | SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); | ||
2496 | dev->netdev_ops = &bcmgenet_netdev_ops; | ||
2497 | netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); | ||
2498 | |||
2499 | priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); | ||
2500 | |||
2501 | /* Set hardware features */ | ||
2502 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | ||
2503 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | ||
2504 | |||
2505 | /* Set the needed headroom to account for any possible | ||
2506 | * features enabling/disabling at runtime | ||
2507 | */ | ||
2508 | dev->needed_headroom += 64; | ||
2509 | |||
2510 | netdev_boot_setup_check(dev); | ||
2511 | |||
2512 | priv->dev = dev; | ||
2513 | priv->pdev = pdev; | ||
2514 | priv->version = (enum bcmgenet_version)of_id->data; | ||
2515 | |||
2516 | bcmgenet_set_hw_params(priv); | ||
2517 | |||
2518 | spin_lock_init(&priv->lock); | ||
2519 | /* Mii wait queue */ | ||
2520 | init_waitqueue_head(&priv->wq); | ||
2521 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ | ||
2522 | priv->rx_buf_len = RX_BUF_LENGTH; | ||
2523 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | ||
2524 | |||
2525 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); | ||
2526 | if (IS_ERR(priv->clk)) | ||
2527 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); | ||
2528 | |||
2529 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); | ||
2530 | if (IS_ERR(priv->clk_wol)) | ||
2531 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); | ||
2532 | |||
2533 | if (!IS_ERR(priv->clk)) | ||
2534 | clk_prepare_enable(priv->clk); | ||
2535 | |||
2536 | err = reset_umac(priv); | ||
2537 | if (err) | ||
2538 | goto err_clk_disable; | ||
2539 | |||
2540 | err = bcmgenet_mii_init(dev); | ||
2541 | if (err) | ||
2542 | goto err_clk_disable; | ||
2543 | |||
2544 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues | ||
2545 | * just the ring 16 descriptor based TX | ||
2546 | */ | ||
2547 | netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); | ||
2548 | netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); | ||
2549 | |||
2550 | err = register_netdev(dev); | ||
2551 | if (err) | ||
2552 | goto err_clk_disable; | ||
2553 | |||
2554 | /* Turn off the main clock, WOL clock is handled separately */ | ||
2555 | if (!IS_ERR(priv->clk)) | ||
2556 | clk_disable_unprepare(priv->clk); | ||
2557 | |||
2558 | return err; | ||
2559 | |||
2560 | err_clk_disable: | ||
2561 | if (!IS_ERR(priv->clk)) | ||
2562 | clk_disable_unprepare(priv->clk); | ||
2563 | err: | ||
2564 | free_netdev(dev); | ||
2565 | return err; | ||
2566 | } | ||
2567 | |||
2568 | static int bcmgenet_remove(struct platform_device *pdev) | ||
2569 | { | ||
2570 | struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); | ||
2571 | |||
2572 | dev_set_drvdata(&pdev->dev, NULL); | ||
2573 | unregister_netdev(priv->dev); | ||
2574 | bcmgenet_mii_exit(priv->dev); | ||
2575 | free_netdev(priv->dev); | ||
2576 | |||
2577 | return 0; | ||
2578 | } | ||
2579 | |||
2580 | |||
2581 | static struct platform_driver bcmgenet_driver = { | ||
2582 | .probe = bcmgenet_probe, | ||
2583 | .remove = bcmgenet_remove, | ||
2584 | .driver = { | ||
2585 | .name = "bcmgenet", | ||
2586 | .owner = THIS_MODULE, | ||
2587 | .of_match_table = bcmgenet_match, | ||
2588 | }, | ||
2589 | }; | ||
2590 | module_platform_driver(bcmgenet_driver); | ||
2591 | |||
2592 | MODULE_AUTHOR("Broadcom Corporation"); | ||
2593 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); | ||
2594 | MODULE_ALIAS("platform:bcmgenet"); | ||
2595 | MODULE_LICENSE("GPL"); | ||