aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2018-01-12 16:34:24 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-15 14:38:55 -0500
commit4d5ae32f5e1e13f7f36d6439ec3257993b9f5b88 (patch)
treef1ad3ac842a08eaa2ec8c6f063eff7b0b27fcf14
parent1c2f11466b8b12d2487bd79ac203086c8ce60e1d (diff)
net: ethernet: Add a driver for Gemini gigabit ethernet
The Gemini ethernet has been around for years as an out-of-tree patch used with the NAS boxen and routers built on StorLink SL3512 and SL3516, later Storm Semiconductor, later Cortina Systems. These ASICs are still being deployed and brand new off-the-shelf systems using it can easily be acquired. The full name of the IP block is "Net Engine and Gigabit Ethernet MAC" commonly just called "GMAC". The hardware block contains a common TCP Offload Enginer (TOE) that can be used by both MACs. The current driver does not use it. Cc: Tobias Waldvogel <tobias.waldvogel@gmail.com> Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/cortina/Kconfig22
-rw-r--r--drivers/net/ethernet/cortina/Makefile4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2593
-rw-r--r--drivers/net/ethernet/cortina/gemini.h958
7 files changed, 3581 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e22ca0ae995d..9e0fd33998ac 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1327,8 +1327,10 @@ T: git git://github.com/ulli-kroll/linux.git
1327S: Maintained 1327S: Maintained
1328F: Documentation/devicetree/bindings/arm/gemini.txt 1328F: Documentation/devicetree/bindings/arm/gemini.txt
1329F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt 1329F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
1330F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
1330F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt 1331F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
1331F: arch/arm/mach-gemini/ 1332F: arch/arm/mach-gemini/
1333F: drivers/net/ethernet/cortina/gemini/*
1332F: drivers/pinctrl/pinctrl-gemini.c 1334F: drivers/pinctrl/pinctrl-gemini.c
1333F: drivers/rtc/rtc-ftrtc010.c 1335F: drivers/rtc/rtc-ftrtc010.c
1334 1336
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d50519ed7549..b6cf4b6962f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -42,6 +42,7 @@ source "drivers/net/ethernet/cavium/Kconfig"
42source "drivers/net/ethernet/chelsio/Kconfig" 42source "drivers/net/ethernet/chelsio/Kconfig"
43source "drivers/net/ethernet/cirrus/Kconfig" 43source "drivers/net/ethernet/cirrus/Kconfig"
44source "drivers/net/ethernet/cisco/Kconfig" 44source "drivers/net/ethernet/cisco/Kconfig"
45source "drivers/net/ethernet/cortina/Kconfig"
45 46
46config CX_ECAT 47config CX_ECAT
47 tristate "Beckhoff CX5020 EtherCAT master support" 48 tristate "Beckhoff CX5020 EtherCAT master support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 6cf5aded9423..3cdf01e96e0b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
29obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 29obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
30obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 30obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
31obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 31obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
32obj-$(CONFIG_NET_VENDOR_CORTINA) += cortina/
32obj-$(CONFIG_CX_ECAT) += ec_bhf.o 33obj-$(CONFIG_CX_ECAT) += ec_bhf.o
33obj-$(CONFIG_DM9000) += davicom/ 34obj-$(CONFIG_DM9000) += davicom/
34obj-$(CONFIG_DNET) += dnet.o 35obj-$(CONFIG_DNET) += dnet.o
diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig
new file mode 100644
index 000000000000..0df743ea51f1
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Kconfig
@@ -0,0 +1,22 @@
1# SPDX-License-Identifier: GPL-2.0
2# Cortina ethernet devices
3
4config NET_VENDOR_CORTINA
5 bool "Cortina Gemini devices"
6 default y
7 ---help---
8 If you have a network (Ethernet) card belonging to this class, say Y
9 and read the Ethernet-HOWTO, available from
10 <http://www.tldp.org/docs.html#howto>.
11
12if NET_VENDOR_CORTINA
13
14config GEMINI_ETHERNET
15 tristate "Gemini Gigabit Ethernet support"
16 depends on OF
17 select PHYLIB
18 select CRC32
19 ---help---
20 This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
21
22endif # NET_VENDOR_CORTINA
diff --git a/drivers/net/ethernet/cortina/Makefile b/drivers/net/ethernet/cortina/Makefile
new file mode 100644
index 000000000000..4e86d398a89c
--- /dev/null
+++ b/drivers/net/ethernet/cortina/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2# Makefile for the Cortina Gemini network device drivers.
3
4obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
new file mode 100644
index 000000000000..5eb999af2c40
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -0,0 +1,2593 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Ethernet device driver for Cortina Systems Gemini SoC
3 * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus
4 * Net Engine and Gigabit Ethernet MAC (GMAC)
5 * This hardware contains a TCP Offload Engine (TOE) but currently the
6 * driver does not make use of it.
7 *
8 * Authors:
9 * Linus Walleij <linus.walleij@linaro.org>
10 * Tobias Waldvogel <tobias.waldvogel@gmail.com> (OpenWRT)
11 * Michał Mirosław <mirq-linux@rere.qmqm.pl>
12 * Paulius Zaleckas <paulius.zaleckas@gmail.com>
13 * Giuseppe De Robertis <Giuseppe.DeRobertis@ba.infn.it>
14 * Gary Chen & Ch Hsu Storlink Semiconductor
15 */
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/spinlock.h>
21#include <linux/slab.h>
22#include <linux/dma-mapping.h>
23#include <linux/cache.h>
24#include <linux/interrupt.h>
25#include <linux/reset.h>
26#include <linux/clk.h>
27#include <linux/of.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
30#include <linux/of_platform.h>
31#include <linux/etherdevice.h>
32#include <linux/if_vlan.h>
33#include <linux/skbuff.h>
34#include <linux/phy.h>
35#include <linux/crc32.h>
36#include <linux/ethtool.h>
37#include <linux/tcp.h>
38#include <linux/u64_stats_sync.h>
39
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/ipv6.h>
43
44#include "gemini.h"
45
46#define DRV_NAME "gmac-gemini"
47#define DRV_VERSION "1.0"
48
49#define HSIZE_8 0x00
50#define HSIZE_16 0x01
51#define HSIZE_32 0x02
52
53#define HBURST_SINGLE 0x00
54#define HBURST_INCR 0x01
55#define HBURST_INCR4 0x02
56#define HBURST_INCR8 0x03
57
58#define HPROT_DATA_CACHE BIT(0)
59#define HPROT_PRIVILIGED BIT(1)
60#define HPROT_BUFFERABLE BIT(2)
61#define HPROT_CACHABLE BIT(3)
62
63#define DEFAULT_RX_COALESCE_NSECS 0
64#define DEFAULT_GMAC_RXQ_ORDER 9
65#define DEFAULT_GMAC_TXQ_ORDER 8
66#define DEFAULT_RX_BUF_ORDER 11
67#define DEFAULT_NAPI_WEIGHT 64
68#define TX_MAX_FRAGS 16
69#define TX_QUEUE_NUM 1 /* max: 6 */
70#define RX_MAX_ALLOC_ORDER 2
71
72#define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \
73 GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT)
74#define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \
75 GMAC0_SWTQ00_FIN_INT_BIT)
76#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
77
78#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
79 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
80 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
81
82/**
83 * struct gmac_queue_page - page buffer per-page info
84 */
85struct gmac_queue_page {
86 struct page *page;
87 dma_addr_t mapping;
88};
89
90struct gmac_txq {
91 struct gmac_txdesc *ring;
92 struct sk_buff **skb;
93 unsigned int cptr;
94 unsigned int noirq_packets;
95};
96
97struct gemini_ethernet;
98
99struct gemini_ethernet_port {
100 u8 id; /* 0 or 1 */
101
102 struct gemini_ethernet *geth;
103 struct net_device *netdev;
104 struct device *dev;
105 void __iomem *dma_base;
106 void __iomem *gmac_base;
107 struct clk *pclk;
108 struct reset_control *reset;
109 int irq;
110 __le32 mac_addr[3];
111
112 void __iomem *rxq_rwptr;
113 struct gmac_rxdesc *rxq_ring;
114 unsigned int rxq_order;
115
116 struct napi_struct napi;
117 struct hrtimer rx_coalesce_timer;
118 unsigned int rx_coalesce_nsecs;
119 unsigned int freeq_refill;
120 struct gmac_txq txq[TX_QUEUE_NUM];
121 unsigned int txq_order;
122 unsigned int irq_every_tx_packets;
123
124 dma_addr_t rxq_dma_base;
125 dma_addr_t txq_dma_base;
126
127 unsigned int msg_enable;
128 spinlock_t config_lock; /* Locks config register */
129
130 struct u64_stats_sync tx_stats_syncp;
131 struct u64_stats_sync rx_stats_syncp;
132 struct u64_stats_sync ir_stats_syncp;
133
134 struct rtnl_link_stats64 stats;
135 u64 hw_stats[RX_STATS_NUM];
136 u64 rx_stats[RX_STATUS_NUM];
137 u64 rx_csum_stats[RX_CHKSUM_NUM];
138 u64 rx_napi_exits;
139 u64 tx_frag_stats[TX_MAX_FRAGS];
140 u64 tx_frags_linearized;
141 u64 tx_hw_csummed;
142};
143
144struct gemini_ethernet {
145 struct device *dev;
146 void __iomem *base;
147 struct gemini_ethernet_port *port0;
148 struct gemini_ethernet_port *port1;
149
150 spinlock_t irq_lock; /* Locks IRQ-related registers */
151 unsigned int freeq_order;
152 unsigned int freeq_frag_order;
153 struct gmac_rxdesc *freeq_ring;
154 dma_addr_t freeq_dma_base;
155 struct gmac_queue_page *freeq_pages;
156 unsigned int num_freeq_pages;
157 spinlock_t freeq_lock; /* Locks queue from reentrance */
158};
159
160#define GMAC_STATS_NUM ( \
161 RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \
162 TX_MAX_FRAGS + 2)
163
164static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = {
165 "GMAC_IN_DISCARDS",
166 "GMAC_IN_ERRORS",
167 "GMAC_IN_MCAST",
168 "GMAC_IN_BCAST",
169 "GMAC_IN_MAC1",
170 "GMAC_IN_MAC2",
171 "RX_STATUS_GOOD_FRAME",
172 "RX_STATUS_TOO_LONG_GOOD_CRC",
173 "RX_STATUS_RUNT_FRAME",
174 "RX_STATUS_SFD_NOT_FOUND",
175 "RX_STATUS_CRC_ERROR",
176 "RX_STATUS_TOO_LONG_BAD_CRC",
177 "RX_STATUS_ALIGNMENT_ERROR",
178 "RX_STATUS_TOO_LONG_BAD_ALIGN",
179 "RX_STATUS_RX_ERR",
180 "RX_STATUS_DA_FILTERED",
181 "RX_STATUS_BUFFER_FULL",
182 "RX_STATUS_11",
183 "RX_STATUS_12",
184 "RX_STATUS_13",
185 "RX_STATUS_14",
186 "RX_STATUS_15",
187 "RX_CHKSUM_IP_UDP_TCP_OK",
188 "RX_CHKSUM_IP_OK_ONLY",
189 "RX_CHKSUM_NONE",
190 "RX_CHKSUM_3",
191 "RX_CHKSUM_IP_ERR_UNKNOWN",
192 "RX_CHKSUM_IP_ERR",
193 "RX_CHKSUM_TCP_UDP_ERR",
194 "RX_CHKSUM_7",
195 "RX_NAPI_EXITS",
196 "TX_FRAGS[1]",
197 "TX_FRAGS[2]",
198 "TX_FRAGS[3]",
199 "TX_FRAGS[4]",
200 "TX_FRAGS[5]",
201 "TX_FRAGS[6]",
202 "TX_FRAGS[7]",
203 "TX_FRAGS[8]",
204 "TX_FRAGS[9]",
205 "TX_FRAGS[10]",
206 "TX_FRAGS[11]",
207 "TX_FRAGS[12]",
208 "TX_FRAGS[13]",
209 "TX_FRAGS[14]",
210 "TX_FRAGS[15]",
211 "TX_FRAGS[16+]",
212 "TX_FRAGS_LINEARIZED",
213 "TX_HW_CSUMMED",
214};
215
216static void gmac_dump_dma_state(struct net_device *netdev);
217
218static void gmac_update_config0_reg(struct net_device *netdev,
219 u32 val, u32 vmask)
220{
221 struct gemini_ethernet_port *port = netdev_priv(netdev);
222 unsigned long flags;
223 u32 reg;
224
225 spin_lock_irqsave(&port->config_lock, flags);
226
227 reg = readl(port->gmac_base + GMAC_CONFIG0);
228 reg = (reg & ~vmask) | val;
229 writel(reg, port->gmac_base + GMAC_CONFIG0);
230
231 spin_unlock_irqrestore(&port->config_lock, flags);
232}
233
234static void gmac_enable_tx_rx(struct net_device *netdev)
235{
236 struct gemini_ethernet_port *port = netdev_priv(netdev);
237 unsigned long flags;
238 u32 reg;
239
240 spin_lock_irqsave(&port->config_lock, flags);
241
242 reg = readl(port->gmac_base + GMAC_CONFIG0);
243 reg &= ~CONFIG0_TX_RX_DISABLE;
244 writel(reg, port->gmac_base + GMAC_CONFIG0);
245
246 spin_unlock_irqrestore(&port->config_lock, flags);
247}
248
249static void gmac_disable_tx_rx(struct net_device *netdev)
250{
251 struct gemini_ethernet_port *port = netdev_priv(netdev);
252 unsigned long flags;
253 u32 val;
254
255 spin_lock_irqsave(&port->config_lock, flags);
256
257 val = readl(port->gmac_base + GMAC_CONFIG0);
258 val |= CONFIG0_TX_RX_DISABLE;
259 writel(val, port->gmac_base + GMAC_CONFIG0);
260
261 spin_unlock_irqrestore(&port->config_lock, flags);
262
263 mdelay(10); /* let GMAC consume packet */
264}
265
266static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
267{
268 struct gemini_ethernet_port *port = netdev_priv(netdev);
269 unsigned long flags;
270 u32 val;
271
272 spin_lock_irqsave(&port->config_lock, flags);
273
274 val = readl(port->gmac_base + GMAC_CONFIG0);
275 val &= ~CONFIG0_FLOW_CTL;
276 if (tx)
277 val |= CONFIG0_FLOW_TX;
278 if (rx)
279 val |= CONFIG0_FLOW_RX;
280 writel(val, port->gmac_base + GMAC_CONFIG0);
281
282 spin_unlock_irqrestore(&port->config_lock, flags);
283}
284
285static void gmac_speed_set(struct net_device *netdev)
286{
287 struct gemini_ethernet_port *port = netdev_priv(netdev);
288 struct phy_device *phydev = netdev->phydev;
289 union gmac_status status, old_status;
290 int pause_tx = 0;
291 int pause_rx = 0;
292
293 status.bits32 = readl(port->gmac_base + GMAC_STATUS);
294 old_status.bits32 = status.bits32;
295 status.bits.link = phydev->link;
296 status.bits.duplex = phydev->duplex;
297
298 switch (phydev->speed) {
299 case 1000:
300 status.bits.speed = GMAC_SPEED_1000;
301 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
302 status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
303 netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
304 break;
305 case 100:
306 status.bits.speed = GMAC_SPEED_100;
307 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
308 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
309 netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
310 break;
311 case 10:
312 status.bits.speed = GMAC_SPEED_10;
313 if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
314 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
315 netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
316 break;
317 default:
318 netdev_warn(netdev, "Not supported PHY speed (%d)\n",
319 phydev->speed);
320 }
321
322 if (phydev->duplex == DUPLEX_FULL) {
323 u16 lcladv = phy_read(phydev, MII_ADVERTISE);
324 u16 rmtadv = phy_read(phydev, MII_LPA);
325 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
326
327 if (cap & FLOW_CTRL_RX)
328 pause_rx = 1;
329 if (cap & FLOW_CTRL_TX)
330 pause_tx = 1;
331 }
332
333 gmac_set_flow_control(netdev, pause_tx, pause_rx);
334
335 if (old_status.bits32 == status.bits32)
336 return;
337
338 if (netif_msg_link(port)) {
339 phy_print_status(phydev);
340 netdev_info(netdev, "link flow control: %s\n",
341 phydev->pause
342 ? (phydev->asym_pause ? "tx" : "both")
343 : (phydev->asym_pause ? "rx" : "none")
344 );
345 }
346
347 gmac_disable_tx_rx(netdev);
348 writel(status.bits32, port->gmac_base + GMAC_STATUS);
349 gmac_enable_tx_rx(netdev);
350}
351
352static int gmac_setup_phy(struct net_device *netdev)
353{
354 struct gemini_ethernet_port *port = netdev_priv(netdev);
355 union gmac_status status = { .bits32 = 0 };
356 struct device *dev = port->dev;
357 struct phy_device *phy;
358
359 phy = of_phy_get_and_connect(netdev,
360 dev->of_node,
361 gmac_speed_set);
362 if (!phy)
363 return -ENODEV;
364 netdev->phydev = phy;
365
366 netdev_info(netdev, "connected to PHY \"%s\"\n",
367 phydev_name(phy));
368 phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
369 (unsigned long)phy->phy_id,
370 phy_modes(phy->interface));
371
372 phy->supported &= PHY_GBIT_FEATURES;
373 phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
374 phy->advertising = phy->supported;
375
376 /* set PHY interface type */
377 switch (phy->interface) {
378 case PHY_INTERFACE_MODE_MII:
379 netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
380 status.bits.mii_rmii = GMAC_PHY_MII;
381 netdev_info(netdev, "connect to MII\n");
382 break;
383 case PHY_INTERFACE_MODE_GMII:
384 netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
385 status.bits.mii_rmii = GMAC_PHY_GMII;
386 netdev_info(netdev, "connect to GMII\n");
387 break;
388 case PHY_INTERFACE_MODE_RGMII:
389 dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
390 status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
391 netdev_info(netdev, "connect to RGMII\n");
392 break;
393 default:
394 netdev_err(netdev, "Unsupported MII interface\n");
395 phy_disconnect(phy);
396 netdev->phydev = NULL;
397 return -EINVAL;
398 }
399 writel(status.bits32, port->gmac_base + GMAC_STATUS);
400
401 return 0;
402}
403
404static int gmac_pick_rx_max_len(int max_l3_len)
405{
406 /* index = CONFIG_MAXLEN_XXX values */
407 static const int max_len[8] = {
408 1536, 1518, 1522, 1542,
409 9212, 10236, 1518, 1518
410 };
411 int i, n = 5;
412
413 max_l3_len += ETH_HLEN + VLAN_HLEN;
414
415 if (max_l3_len > max_len[n])
416 return -1;
417
418 for (i = 0; i < 5; i++) {
419 if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
420 n = i;
421 }
422
423 return n;
424}
425
426static int gmac_init(struct net_device *netdev)
427{
428 struct gemini_ethernet_port *port = netdev_priv(netdev);
429 union gmac_config0 config0 = { .bits = {
430 .dis_tx = 1,
431 .dis_rx = 1,
432 .ipv4_rx_chksum = 1,
433 .ipv6_rx_chksum = 1,
434 .rx_err_detect = 1,
435 .rgmm_edge = 1,
436 .port0_chk_hwq = 1,
437 .port1_chk_hwq = 1,
438 .port0_chk_toeq = 1,
439 .port1_chk_toeq = 1,
440 .port0_chk_classq = 1,
441 .port1_chk_classq = 1,
442 } };
443 union gmac_ahb_weight ahb_weight = { .bits = {
444 .rx_weight = 1,
445 .tx_weight = 1,
446 .hash_weight = 1,
447 .pre_req = 0x1f,
448 .tq_dv_threshold = 0,
449 } };
450 union gmac_tx_wcr0 hw_weigh = { .bits = {
451 .hw_tq3 = 1,
452 .hw_tq2 = 1,
453 .hw_tq1 = 1,
454 .hw_tq0 = 1,
455 } };
456 union gmac_tx_wcr1 sw_weigh = { .bits = {
457 .sw_tq5 = 1,
458 .sw_tq4 = 1,
459 .sw_tq3 = 1,
460 .sw_tq2 = 1,
461 .sw_tq1 = 1,
462 .sw_tq0 = 1,
463 } };
464 union gmac_config1 config1 = { .bits = {
465 .set_threshold = 16,
466 .rel_threshold = 24,
467 } };
468 union gmac_config2 config2 = { .bits = {
469 .set_threshold = 16,
470 .rel_threshold = 32,
471 } };
472 union gmac_config3 config3 = { .bits = {
473 .set_threshold = 0,
474 .rel_threshold = 0,
475 } };
476 union gmac_config0 tmp;
477 u32 val;
478
479 config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
480 tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
481 config0.bits.reserved = tmp.bits.reserved;
482 writel(config0.bits32, port->gmac_base + GMAC_CONFIG0);
483 writel(config1.bits32, port->gmac_base + GMAC_CONFIG1);
484 writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
485 writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
486
487 val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
488 writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
489
490 writel(hw_weigh.bits32,
491 port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG);
492 writel(sw_weigh.bits32,
493 port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG);
494
495 port->rxq_order = DEFAULT_GMAC_RXQ_ORDER;
496 port->txq_order = DEFAULT_GMAC_TXQ_ORDER;
497 port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS;
498
499 /* Mark every quarter of the queue a packet for interrupt
500 * in order to be able to wake up the queue if it was stopped
501 */
502 port->irq_every_tx_packets = 1 << (port->txq_order - 2);
503
504 return 0;
505}
506
507static void gmac_uninit(struct net_device *netdev)
508{
509 if (netdev->phydev)
510 phy_disconnect(netdev->phydev);
511}
512
513static int gmac_setup_txqs(struct net_device *netdev)
514{
515 struct gemini_ethernet_port *port = netdev_priv(netdev);
516 unsigned int n_txq = netdev->num_tx_queues;
517 struct gemini_ethernet *geth = port->geth;
518 size_t entries = 1 << port->txq_order;
519 struct gmac_txq *txq = port->txq;
520 struct gmac_txdesc *desc_ring;
521 size_t len = n_txq * entries;
522 struct sk_buff **skb_tab;
523 void __iomem *rwptr_reg;
524 unsigned int r;
525 int i;
526
527 rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
528
529 skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL);
530 if (!skb_tab)
531 return -ENOMEM;
532
533 desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring),
534 &port->txq_dma_base, GFP_KERNEL);
535
536 if (!desc_ring) {
537 kfree(skb_tab);
538 return -ENOMEM;
539 }
540
541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
542 dev_warn(geth->dev, "TX queue base it not aligned\n");
543 return -ENOMEM;
544 }
545
546 writel(port->txq_dma_base | port->txq_order,
547 port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
548
549 for (i = 0; i < n_txq; i++) {
550 txq->ring = desc_ring;
551 txq->skb = skb_tab;
552 txq->noirq_packets = 0;
553
554 r = readw(rwptr_reg);
555 rwptr_reg += 2;
556 writew(r, rwptr_reg);
557 rwptr_reg += 2;
558 txq->cptr = r;
559
560 txq++;
561 desc_ring += entries;
562 skb_tab += entries;
563 }
564
565 return 0;
566}
567
568static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
569 unsigned int r)
570{
571 struct gemini_ethernet_port *port = netdev_priv(netdev);
572 unsigned int m = (1 << port->txq_order) - 1;
573 struct gemini_ethernet *geth = port->geth;
574 unsigned int c = txq->cptr;
575 union gmac_txdesc_0 word0;
576 union gmac_txdesc_1 word1;
577 unsigned int hwchksum = 0;
578 unsigned long bytes = 0;
579 struct gmac_txdesc *txd;
580 unsigned short nfrags;
581 unsigned int errs = 0;
582 unsigned int pkts = 0;
583 unsigned int word3;
584 dma_addr_t mapping;
585
586 if (c == r)
587 return;
588
589 while (c != r) {
590 txd = txq->ring + c;
591 word0 = txd->word0;
592 word1 = txd->word1;
593 mapping = txd->word2.buf_adr;
594 word3 = txd->word3.bits32;
595
596 dma_unmap_single(geth->dev, mapping,
597 word0.bits.buffer_size, DMA_TO_DEVICE);
598
599 if (word3 & EOF_BIT)
600 dev_kfree_skb(txq->skb[c]);
601
602 c++;
603 c &= m;
604
605 if (!(word3 & SOF_BIT))
606 continue;
607
608 if (!word0.bits.status_tx_ok) {
609 errs++;
610 continue;
611 }
612
613 pkts++;
614 bytes += txd->word1.bits.byte_count;
615
616 if (word1.bits32 & TSS_CHECKUM_ENABLE)
617 hwchksum++;
618
619 nfrags = word0.bits.desc_count - 1;
620 if (nfrags) {
621 if (nfrags >= TX_MAX_FRAGS)
622 nfrags = TX_MAX_FRAGS - 1;
623
624 u64_stats_update_begin(&port->tx_stats_syncp);
625 port->tx_frag_stats[nfrags]++;
626 u64_stats_update_end(&port->ir_stats_syncp);
627 }
628 }
629
630 u64_stats_update_begin(&port->ir_stats_syncp);
631 port->stats.tx_errors += errs;
632 port->stats.tx_packets += pkts;
633 port->stats.tx_bytes += bytes;
634 port->tx_hw_csummed += hwchksum;
635 u64_stats_update_end(&port->ir_stats_syncp);
636
637 txq->cptr = c;
638}
639
640static void gmac_cleanup_txqs(struct net_device *netdev)
641{
642 struct gemini_ethernet_port *port = netdev_priv(netdev);
643 unsigned int n_txq = netdev->num_tx_queues;
644 struct gemini_ethernet *geth = port->geth;
645 void __iomem *rwptr_reg;
646 unsigned int r, i;
647
648 rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
649
650 for (i = 0; i < n_txq; i++) {
651 r = readw(rwptr_reg);
652 rwptr_reg += 2;
653 writew(r, rwptr_reg);
654 rwptr_reg += 2;
655
656 gmac_clean_txq(netdev, port->txq + i, r);
657 }
658 writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG);
659
660 kfree(port->txq->skb);
661 dma_free_coherent(geth->dev,
662 n_txq * sizeof(*port->txq->ring) << port->txq_order,
663 port->txq->ring, port->txq_dma_base);
664}
665
666static int gmac_setup_rxq(struct net_device *netdev)
667{
668 struct gemini_ethernet_port *port = netdev_priv(netdev);
669 struct gemini_ethernet *geth = port->geth;
670 struct nontoe_qhdr __iomem *qhdr;
671
672 qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
673 port->rxq_rwptr = &qhdr->word1;
674
675 /* Remap a slew of memory to use for the RX queue */
676 port->rxq_ring = dma_alloc_coherent(geth->dev,
677 sizeof(*port->rxq_ring) << port->rxq_order,
678 &port->rxq_dma_base, GFP_KERNEL);
679 if (!port->rxq_ring)
680 return -ENOMEM;
681 if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) {
682 dev_warn(geth->dev, "RX queue base it not aligned\n");
683 return -ENOMEM;
684 }
685
686 writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0);
687 writel(0, port->rxq_rwptr);
688 return 0;
689}
690
691static struct gmac_queue_page *
692gmac_get_queue_page(struct gemini_ethernet *geth,
693 struct gemini_ethernet_port *port,
694 dma_addr_t addr)
695{
696 struct gmac_queue_page *gpage;
697 dma_addr_t mapping;
698 int i;
699
700 /* Only look for even pages */
701 mapping = addr & PAGE_MASK;
702
703 if (!geth->freeq_pages) {
704 dev_err(geth->dev, "try to get page with no page list\n");
705 return NULL;
706 }
707
708 /* Look up a ring buffer page from virtual mapping */
709 for (i = 0; i < geth->num_freeq_pages; i++) {
710 gpage = &geth->freeq_pages[i];
711 if (gpage->mapping == mapping)
712 return gpage;
713 }
714
715 return NULL;
716}
717
718static void gmac_cleanup_rxq(struct net_device *netdev)
719{
720 struct gemini_ethernet_port *port = netdev_priv(netdev);
721 struct gemini_ethernet *geth = port->geth;
722 struct gmac_rxdesc *rxd = port->rxq_ring;
723 static struct gmac_queue_page *gpage;
724 struct nontoe_qhdr __iomem *qhdr;
725 void __iomem *dma_reg;
726 void __iomem *ptr_reg;
727 dma_addr_t mapping;
728 union dma_rwptr rw;
729 unsigned int r, w;
730
731 qhdr = geth->base +
732 TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id);
733 dma_reg = &qhdr->word0;
734 ptr_reg = &qhdr->word1;
735
736 rw.bits32 = readl(ptr_reg);
737 r = rw.bits.rptr;
738 w = rw.bits.wptr;
739 writew(r, ptr_reg + 2);
740
741 writel(0, dma_reg);
742
743 /* Loop from read pointer to write pointer of the RX queue
744 * and free up all pages by the queue.
745 */
746 while (r != w) {
747 mapping = rxd[r].word2.buf_adr;
748 r++;
749 r &= ((1 << port->rxq_order) - 1);
750
751 if (!mapping)
752 continue;
753
754 /* Freeq pointers are one page off */
755 gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
756 if (!gpage) {
757 dev_err(geth->dev, "could not find page\n");
758 continue;
759 }
760 /* Release the RX queue reference to the page */
761 put_page(gpage->page);
762 }
763
764 dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order,
765 port->rxq_ring, port->rxq_dma_base);
766}
767
768static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth,
769 int pn)
770{
771 struct gmac_rxdesc *freeq_entry;
772 struct gmac_queue_page *gpage;
773 unsigned int fpp_order;
774 unsigned int frag_len;
775 dma_addr_t mapping;
776 struct page *page;
777 int i;
778
779 /* First allocate and DMA map a single page */
780 page = alloc_page(GFP_ATOMIC);
781 if (!page)
782 return NULL;
783
784 mapping = dma_map_single(geth->dev, page_address(page),
785 PAGE_SIZE, DMA_FROM_DEVICE);
786 if (dma_mapping_error(geth->dev, mapping)) {
787 put_page(page);
788 return NULL;
789 }
790
791 /* The assign the page mapping (physical address) to the buffer address
792 * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes,
793 * 4k), and the default RX frag order is 11 (fragments are up 20 2048
794 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus
795 * each page normally needs two entries in the queue.
796 */
797 frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */
798 fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
799 freeq_entry = geth->freeq_ring + (pn << fpp_order);
800 dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n",
801 pn, frag_len, (1 << fpp_order), freeq_entry);
802 for (i = (1 << fpp_order); i > 0; i--) {
803 freeq_entry->word2.buf_adr = mapping;
804 freeq_entry++;
805 mapping += frag_len;
806 }
807
808 /* If the freeq entry already has a page mapped, then unmap it. */
809 gpage = &geth->freeq_pages[pn];
810 if (gpage->page) {
811 mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
812 dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
813 /* This should be the last reference to the page so it gets
814 * released
815 */
816 put_page(gpage->page);
817 }
818
819 /* Then put our new mapping into the page table */
820 dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n",
821 pn, (unsigned int)mapping, page);
822 gpage->mapping = mapping;
823 gpage->page = page;
824
825 return page;
826}
827
828/**
829 * geth_fill_freeq() - Fill the freeq with empty fragments to use
830 * @geth: the ethernet adapter
831 * @refill: whether to reset the queue by filling in all freeq entries or
832 * just refill it, usually the interrupt to refill the queue happens when
833 * the queue is half empty.
834 */
835static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill)
836{
837 unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
838 unsigned int count = 0;
839 unsigned int pn, epn;
840 unsigned long flags;
841 union dma_rwptr rw;
842 unsigned int m_pn;
843
844 /* Mask for page */
845 m_pn = (1 << (geth->freeq_order - fpp_order)) - 1;
846
847 spin_lock_irqsave(&geth->freeq_lock, flags);
848
849 rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG);
850 pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order;
851 epn = (rw.bits.rptr >> fpp_order) - 1;
852 epn &= m_pn;
853
854 /* Loop over the freeq ring buffer entries */
855 while (pn != epn) {
856 struct gmac_queue_page *gpage;
857 struct page *page;
858
859 gpage = &geth->freeq_pages[pn];
860 page = gpage->page;
861
862 dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n",
863 pn, page_ref_count(page), 1 << fpp_order);
864
865 if (page_ref_count(page) > 1) {
866 unsigned int fl = (pn - epn) & m_pn;
867
868 if (fl > 64 >> fpp_order)
869 break;
870
871 page = geth_freeq_alloc_map_page(geth, pn);
872 if (!page)
873 break;
874 }
875
876 /* Add one reference per fragment in the page */
877 page_ref_add(page, 1 << fpp_order);
878 count += 1 << fpp_order;
879 pn++;
880 pn &= m_pn;
881 }
882
883 writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
884
885 spin_unlock_irqrestore(&geth->freeq_lock, flags);
886
887 return count;
888}
889
890static int geth_setup_freeq(struct gemini_ethernet *geth)
891{
892 unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
893 unsigned int frag_len = 1 << geth->freeq_frag_order;
894 unsigned int len = 1 << geth->freeq_order;
895 unsigned int pages = len >> fpp_order;
896 union queue_threshold qt;
897 union dma_skb_size skbsz;
898 unsigned int filled;
899 unsigned int pn;
900
901 geth->freeq_ring = dma_alloc_coherent(geth->dev,
902 sizeof(*geth->freeq_ring) << geth->freeq_order,
903 &geth->freeq_dma_base, GFP_KERNEL);
904 if (!geth->freeq_ring)
905 return -ENOMEM;
906 if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) {
907 dev_warn(geth->dev, "queue ring base it not aligned\n");
908 goto err_freeq;
909 }
910
911 /* Allocate a mapping to page look-up index */
912 geth->freeq_pages = kzalloc(pages * sizeof(*geth->freeq_pages),
913 GFP_KERNEL);
914 if (!geth->freeq_pages)
915 goto err_freeq;
916 geth->num_freeq_pages = pages;
917
918 dev_info(geth->dev, "allocate %d pages for queue\n", pages);
919 for (pn = 0; pn < pages; pn++)
920 if (!geth_freeq_alloc_map_page(geth, pn))
921 goto err_freeq_alloc;
922
923 filled = geth_fill_freeq(geth, false);
924 if (!filled)
925 goto err_freeq_alloc;
926
927 qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
928 qt.bits.swfq_empty = 32;
929 writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG);
930
931 skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order;
932 writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG);
933 writel(geth->freeq_dma_base | geth->freeq_order,
934 geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
935
936 return 0;
937
938err_freeq_alloc:
939 while (pn > 0) {
940 struct gmac_queue_page *gpage;
941 dma_addr_t mapping;
942
943 --pn;
944 mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
945 dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
946 gpage = &geth->freeq_pages[pn];
947 put_page(gpage->page);
948 }
949
950 kfree(geth->freeq_pages);
951err_freeq:
952 dma_free_coherent(geth->dev,
953 sizeof(*geth->freeq_ring) << geth->freeq_order,
954 geth->freeq_ring, geth->freeq_dma_base);
955 geth->freeq_ring = NULL;
956 return -ENOMEM;
957}
958
959/**
960 * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue
961 * @geth: the Gemini global ethernet state
962 */
963static void geth_cleanup_freeq(struct gemini_ethernet *geth)
964{
965 unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order;
966 unsigned int frag_len = 1 << geth->freeq_frag_order;
967 unsigned int len = 1 << geth->freeq_order;
968 unsigned int pages = len >> fpp_order;
969 unsigned int pn;
970
971 writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG),
972 geth->base + GLOBAL_SWFQ_RWPTR_REG + 2);
973 writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
974
975 for (pn = 0; pn < pages; pn++) {
976 struct gmac_queue_page *gpage;
977 dma_addr_t mapping;
978
979 mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
980 dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
981
982 gpage = &geth->freeq_pages[pn];
983 while (page_ref_count(gpage->page) > 0)
984 put_page(gpage->page);
985 }
986
987 kfree(geth->freeq_pages);
988
989 dma_free_coherent(geth->dev,
990 sizeof(*geth->freeq_ring) << geth->freeq_order,
991 geth->freeq_ring, geth->freeq_dma_base);
992}
993
994/**
995 * geth_resize_freeq() - resize the software queue depth
996 * @port: the port requesting the change
997 *
998 * This gets called at least once during probe() so the device queue gets
999 * "resized" from the hardware defaults. Since both ports/net devices share
1000 * the same hardware queue, some synchronization between the ports is
1001 * needed.
1002 */
1003static int geth_resize_freeq(struct gemini_ethernet_port *port)
1004{
1005 struct gemini_ethernet *geth = port->geth;
1006 struct net_device *netdev = port->netdev;
1007 struct gemini_ethernet_port *other_port;
1008 struct net_device *other_netdev;
1009 unsigned int new_size = 0;
1010 unsigned int new_order;
1011 unsigned long flags;
1012 u32 en;
1013 int ret;
1014
1015 if (netdev->dev_id == 0)
1016 other_netdev = geth->port1->netdev;
1017 else
1018 other_netdev = geth->port0->netdev;
1019
1020 if (other_netdev && netif_running(other_netdev))
1021 return -EBUSY;
1022
1023 new_size = 1 << (port->rxq_order + 1);
1024 netdev_dbg(netdev, "port %d size: %d order %d\n",
1025 netdev->dev_id,
1026 new_size,
1027 port->rxq_order);
1028 if (other_netdev) {
1029 other_port = netdev_priv(other_netdev);
1030 new_size += 1 << (other_port->rxq_order + 1);
1031 netdev_dbg(other_netdev, "port %d size: %d order %d\n",
1032 other_netdev->dev_id,
1033 (1 << (other_port->rxq_order + 1)),
1034 other_port->rxq_order);
1035 }
1036
1037 new_order = min(15, ilog2(new_size - 1) + 1);
1038 dev_dbg(geth->dev, "set shared queue to size %d order %d\n",
1039 new_size, new_order);
1040 if (geth->freeq_order == new_order)
1041 return 0;
1042
1043 spin_lock_irqsave(&geth->irq_lock, flags);
1044
1045 /* Disable the software queue IRQs */
1046 en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
1047 en &= ~SWFQ_EMPTY_INT_BIT;
1048 writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
1049 spin_unlock_irqrestore(&geth->irq_lock, flags);
1050
1051 /* Drop the old queue */
1052 if (geth->freeq_ring)
1053 geth_cleanup_freeq(geth);
1054
1055 /* Allocate a new queue with the desired order */
1056 geth->freeq_order = new_order;
1057 ret = geth_setup_freeq(geth);
1058
1059 /* Restart the interrupts - NOTE if this is the first resize
1060 * after probe(), this is where the interrupts get turned on
1061 * in the first place.
1062 */
1063 spin_lock_irqsave(&geth->irq_lock, flags);
1064 en |= SWFQ_EMPTY_INT_BIT;
1065 writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
1066 spin_unlock_irqrestore(&geth->irq_lock, flags);
1067
1068 return ret;
1069}
1070
1071static void gmac_tx_irq_enable(struct net_device *netdev,
1072 unsigned int txq, int en)
1073{
1074 struct gemini_ethernet_port *port = netdev_priv(netdev);
1075 struct gemini_ethernet *geth = port->geth;
1076 u32 val, mask;
1077
1078 netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
1079
1080 mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
1081
1082 if (en)
1083 writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
1084
1085 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
1086 val = en ? val | mask : val & ~mask;
1087 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
1088}
1089
1090static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
1091{
1092 struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num);
1093
1094 gmac_tx_irq_enable(netdev, txq_num, 0);
1095 netif_tx_wake_queue(ntxq);
1096}
1097
1098static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
1099 struct gmac_txq *txq, unsigned short *desc)
1100{
1101 struct gemini_ethernet_port *port = netdev_priv(netdev);
1102 struct skb_shared_info *skb_si = skb_shinfo(skb);
1103 unsigned short m = (1 << port->txq_order) - 1;
1104 short frag, last_frag = skb_si->nr_frags - 1;
1105 struct gemini_ethernet *geth = port->geth;
1106 unsigned int word1, word3, buflen;
1107 unsigned short w = *desc;
1108 struct gmac_txdesc *txd;
1109 skb_frag_t *skb_frag;
1110 dma_addr_t mapping;
1111 unsigned short mtu;
1112 void *buffer;
1113
1114 mtu = ETH_HLEN;
1115 mtu += netdev->mtu;
1116 if (skb->protocol == htons(ETH_P_8021Q))
1117 mtu += VLAN_HLEN;
1118
1119 word1 = skb->len;
1120 word3 = SOF_BIT;
1121
1122 if (word1 > mtu) {
1123 word1 |= TSS_MTU_ENABLE_BIT;
1124 word3 |= mtu;
1125 }
1126
1127 if (skb->ip_summed != CHECKSUM_NONE) {
1128 int tcp = 0;
1129
1130 if (skb->protocol == htons(ETH_P_IP)) {
1131 word1 |= TSS_IP_CHKSUM_BIT;
1132 tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
1133 } else { /* IPv6 */
1134 word1 |= TSS_IPV6_ENABLE_BIT;
1135 tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
1136 }
1137
1138 word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
1139 }
1140
1141 frag = -1;
1142 while (frag <= last_frag) {
1143 if (frag == -1) {
1144 buffer = skb->data;
1145 buflen = skb_headlen(skb);
1146 } else {
1147 skb_frag = skb_si->frags + frag;
1148 buffer = page_address(skb_frag_page(skb_frag)) +
1149 skb_frag->page_offset;
1150 buflen = skb_frag->size;
1151 }
1152
1153 if (frag == last_frag) {
1154 word3 |= EOF_BIT;
1155 txq->skb[w] = skb;
1156 }
1157
1158 mapping = dma_map_single(geth->dev, buffer, buflen,
1159 DMA_TO_DEVICE);
1160 if (dma_mapping_error(geth->dev, mapping))
1161 goto map_error;
1162
1163 txd = txq->ring + w;
1164 txd->word0.bits32 = buflen;
1165 txd->word1.bits32 = word1;
1166 txd->word2.buf_adr = mapping;
1167 txd->word3.bits32 = word3;
1168
1169 word3 &= MTU_SIZE_BIT_MASK;
1170 w++;
1171 w &= m;
1172 frag++;
1173 }
1174
1175 *desc = w;
1176 return 0;
1177
1178map_error:
1179 while (w != *desc) {
1180 w--;
1181 w &= m;
1182
1183 dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
1184 txq->ring[w].word0.bits.buffer_size,
1185 DMA_TO_DEVICE);
1186 }
1187 return -ENOMEM;
1188}
1189
1190static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1191{
1192 struct gemini_ethernet_port *port = netdev_priv(netdev);
1193 unsigned short m = (1 << port->txq_order) - 1;
1194 struct netdev_queue *ntxq;
1195 unsigned short r, w, d;
1196 void __iomem *ptr_reg;
1197 struct gmac_txq *txq;
1198 int txq_num, nfrags;
1199 union dma_rwptr rw;
1200
1201 SKB_FRAG_ASSERT(skb);
1202
1203 if (skb->len >= 0x10000)
1204 goto out_drop_free;
1205
1206 txq_num = skb_get_queue_mapping(skb);
1207 ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num);
1208 txq = &port->txq[txq_num];
1209 ntxq = netdev_get_tx_queue(netdev, txq_num);
1210 nfrags = skb_shinfo(skb)->nr_frags;
1211
1212 rw.bits32 = readl(ptr_reg);
1213 r = rw.bits.rptr;
1214 w = rw.bits.wptr;
1215
1216 d = txq->cptr - w - 1;
1217 d &= m;
1218
1219 if (d < nfrags + 2) {
1220 gmac_clean_txq(netdev, txq, r);
1221 d = txq->cptr - w - 1;
1222 d &= m;
1223
1224 if (d < nfrags + 2) {
1225 netif_tx_stop_queue(ntxq);
1226
1227 d = txq->cptr + nfrags + 16;
1228 d &= m;
1229 txq->ring[d].word3.bits.eofie = 1;
1230 gmac_tx_irq_enable(netdev, txq_num, 1);
1231
1232 u64_stats_update_begin(&port->tx_stats_syncp);
1233 netdev->stats.tx_fifo_errors++;
1234 u64_stats_update_end(&port->tx_stats_syncp);
1235 return NETDEV_TX_BUSY;
1236 }
1237 }
1238
1239 if (gmac_map_tx_bufs(netdev, skb, txq, &w)) {
1240 if (skb_linearize(skb))
1241 goto out_drop;
1242
1243 u64_stats_update_begin(&port->tx_stats_syncp);
1244 port->tx_frags_linearized++;
1245 u64_stats_update_end(&port->tx_stats_syncp);
1246
1247 if (gmac_map_tx_bufs(netdev, skb, txq, &w))
1248 goto out_drop_free;
1249 }
1250
1251 writew(w, ptr_reg + 2);
1252
1253 gmac_clean_txq(netdev, txq, r);
1254 return NETDEV_TX_OK;
1255
1256out_drop_free:
1257 dev_kfree_skb(skb);
1258out_drop:
1259 u64_stats_update_begin(&port->tx_stats_syncp);
1260 port->stats.tx_dropped++;
1261 u64_stats_update_end(&port->tx_stats_syncp);
1262 return NETDEV_TX_OK;
1263}
1264
1265static void gmac_tx_timeout(struct net_device *netdev)
1266{
1267 netdev_err(netdev, "Tx timeout\n");
1268 gmac_dump_dma_state(netdev);
1269}
1270
1271static void gmac_enable_irq(struct net_device *netdev, int enable)
1272{
1273 struct gemini_ethernet_port *port = netdev_priv(netdev);
1274 struct gemini_ethernet *geth = port->geth;
1275 unsigned long flags;
1276 u32 val, mask;
1277
1278 netdev_info(netdev, "%s device %d %s\n", __func__,
1279 netdev->dev_id, enable ? "enable" : "disable");
1280 spin_lock_irqsave(&geth->irq_lock, flags);
1281
1282 mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
1283 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
1284 val = enable ? (val | mask) : (val & ~mask);
1285 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
1286
1287 mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
1288 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
1289 val = enable ? (val | mask) : (val & ~mask);
1290 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
1291
1292 mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8);
1293 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
1294 val = enable ? (val | mask) : (val & ~mask);
1295 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
1296
1297 spin_unlock_irqrestore(&geth->irq_lock, flags);
1298}
1299
1300static void gmac_enable_rx_irq(struct net_device *netdev, int enable)
1301{
1302 struct gemini_ethernet_port *port = netdev_priv(netdev);
1303 struct gemini_ethernet *geth = port->geth;
1304 unsigned long flags;
1305 u32 val, mask;
1306
1307 netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id,
1308 enable ? "enable" : "disable");
1309 spin_lock_irqsave(&geth->irq_lock, flags);
1310 mask = DEFAULT_Q0_INT_BIT << netdev->dev_id;
1311
1312 val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
1313 val = enable ? (val | mask) : (val & ~mask);
1314 writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
1315
1316 spin_unlock_irqrestore(&geth->irq_lock, flags);
1317}
1318
1319static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port,
1320 union gmac_rxdesc_0 word0,
1321 unsigned int frame_len)
1322{
1323 unsigned int rx_csum = word0.bits.chksum_status;
1324 unsigned int rx_status = word0.bits.status;
1325 struct sk_buff *skb = NULL;
1326
1327 port->rx_stats[rx_status]++;
1328 port->rx_csum_stats[rx_csum]++;
1329
1330 if (word0.bits.derr || word0.bits.perr ||
1331 rx_status || frame_len < ETH_ZLEN ||
1332 rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) {
1333 port->stats.rx_errors++;
1334
1335 if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status))
1336 port->stats.rx_length_errors++;
1337 if (RX_ERROR_OVER(rx_status))
1338 port->stats.rx_over_errors++;
1339 if (RX_ERROR_CRC(rx_status))
1340 port->stats.rx_crc_errors++;
1341 if (RX_ERROR_FRAME(rx_status))
1342 port->stats.rx_frame_errors++;
1343 return NULL;
1344 }
1345
1346 skb = napi_get_frags(&port->napi);
1347 if (!skb)
1348 goto update_exit;
1349
1350 if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK)
1351 skb->ip_summed = CHECKSUM_UNNECESSARY;
1352
1353update_exit:
1354 port->stats.rx_bytes += frame_len;
1355 port->stats.rx_packets++;
1356 return skb;
1357}
1358
1359static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
1360{
1361 struct gemini_ethernet_port *port = netdev_priv(netdev);
1362 unsigned short m = (1 << port->rxq_order) - 1;
1363 struct gemini_ethernet *geth = port->geth;
1364 void __iomem *ptr_reg = port->rxq_rwptr;
1365 unsigned int frame_len, frag_len;
1366 struct gmac_rxdesc *rx = NULL;
1367 struct gmac_queue_page *gpage;
1368 static struct sk_buff *skb;
1369 union gmac_rxdesc_0 word0;
1370 union gmac_rxdesc_1 word1;
1371 union gmac_rxdesc_3 word3;
1372 struct page *page = NULL;
1373 unsigned int page_offs;
1374 unsigned short r, w;
1375 union dma_rwptr rw;
1376 dma_addr_t mapping;
1377 int frag_nr = 0;
1378
1379 rw.bits32 = readl(ptr_reg);
1380 /* Reset interrupt as all packages until here are taken into account */
1381 writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
1382 geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
1383 r = rw.bits.rptr;
1384 w = rw.bits.wptr;
1385
1386 while (budget && w != r) {
1387 rx = port->rxq_ring + r;
1388 word0 = rx->word0;
1389 word1 = rx->word1;
1390 mapping = rx->word2.buf_adr;
1391 word3 = rx->word3;
1392
1393 r++;
1394 r &= m;
1395
1396 frag_len = word0.bits.buffer_size;
1397 frame_len = word1.bits.byte_count;
1398 page_offs = mapping & ~PAGE_MASK;
1399
1400 if (!mapping) {
1401 netdev_err(netdev,
1402 "rxq[%u]: HW BUG: zero DMA desc\n", r);
1403 goto err_drop;
1404 }
1405
1406 /* Freeq pointers are one page off */
1407 gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
1408 if (!gpage) {
1409 dev_err(geth->dev, "could not find mapping\n");
1410 continue;
1411 }
1412 page = gpage->page;
1413
1414 if (word3.bits32 & SOF_BIT) {
1415 if (skb) {
1416 napi_free_frags(&port->napi);
1417 port->stats.rx_dropped++;
1418 }
1419
1420 skb = gmac_skb_if_good_frame(port, word0, frame_len);
1421 if (!skb)
1422 goto err_drop;
1423
1424 page_offs += NET_IP_ALIGN;
1425 frag_len -= NET_IP_ALIGN;
1426 frag_nr = 0;
1427
1428 } else if (!skb) {
1429 put_page(page);
1430 continue;
1431 }
1432
1433 if (word3.bits32 & EOF_BIT)
1434 frag_len = frame_len - skb->len;
1435
1436 /* append page frag to skb */
1437 if (frag_nr == MAX_SKB_FRAGS)
1438 goto err_drop;
1439
1440 if (frag_len == 0)
1441 netdev_err(netdev, "Received fragment with len = 0\n");
1442
1443 skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len);
1444 skb->len += frag_len;
1445 skb->data_len += frag_len;
1446 skb->truesize += frag_len;
1447 frag_nr++;
1448
1449 if (word3.bits32 & EOF_BIT) {
1450 napi_gro_frags(&port->napi);
1451 skb = NULL;
1452 --budget;
1453 }
1454 continue;
1455
1456err_drop:
1457 if (skb) {
1458 napi_free_frags(&port->napi);
1459 skb = NULL;
1460 }
1461
1462 if (mapping)
1463 put_page(page);
1464
1465 port->stats.rx_dropped++;
1466 }
1467
1468 writew(r, ptr_reg);
1469 return budget;
1470}
1471
1472static int gmac_napi_poll(struct napi_struct *napi, int budget)
1473{
1474 struct gemini_ethernet_port *port = netdev_priv(napi->dev);
1475 struct gemini_ethernet *geth = port->geth;
1476 unsigned int freeq_threshold;
1477 unsigned int received;
1478
1479 freeq_threshold = 1 << (geth->freeq_order - 1);
1480 u64_stats_update_begin(&port->rx_stats_syncp);
1481
1482 received = gmac_rx(napi->dev, budget);
1483 if (received < budget) {
1484 napi_gro_flush(napi, false);
1485 napi_complete_done(napi, received);
1486 gmac_enable_rx_irq(napi->dev, 1);
1487 ++port->rx_napi_exits;
1488 }
1489
1490 port->freeq_refill += (budget - received);
1491 if (port->freeq_refill > freeq_threshold) {
1492 port->freeq_refill -= freeq_threshold;
1493 geth_fill_freeq(geth, true);
1494 }
1495
1496 u64_stats_update_end(&port->rx_stats_syncp);
1497 return received;
1498}
1499
1500static void gmac_dump_dma_state(struct net_device *netdev)
1501{
1502 struct gemini_ethernet_port *port = netdev_priv(netdev);
1503 struct gemini_ethernet *geth = port->geth;
1504 void __iomem *ptr_reg;
1505 u32 reg[5];
1506
1507 /* Interrupt status */
1508 reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
1509 reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
1510 reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
1511 reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
1512 reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
1513 netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1514 reg[0], reg[1], reg[2], reg[3], reg[4]);
1515
1516 /* Interrupt enable */
1517 reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
1518 reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
1519 reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
1520 reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
1521 reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
1522 netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1523 reg[0], reg[1], reg[2], reg[3], reg[4]);
1524
1525 /* RX DMA status */
1526 reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG);
1527 reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG);
1528 reg[2] = GET_RPTR(port->rxq_rwptr);
1529 reg[3] = GET_WPTR(port->rxq_rwptr);
1530 netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1531 reg[0], reg[1], reg[2], reg[3]);
1532
1533 reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG);
1534 reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG);
1535 reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG);
1536 reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG);
1537 netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1538 reg[0], reg[1], reg[2], reg[3]);
1539
1540 /* TX DMA status */
1541 ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG;
1542
1543 reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG);
1544 reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG);
1545 reg[2] = GET_RPTR(ptr_reg);
1546 reg[3] = GET_WPTR(ptr_reg);
1547 netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n",
1548 reg[0], reg[1], reg[2], reg[3]);
1549
1550 reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG);
1551 reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG);
1552 reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG);
1553 reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG);
1554 netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1555 reg[0], reg[1], reg[2], reg[3]);
1556
1557 /* FREE queues status */
1558 ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG;
1559
1560 reg[0] = GET_RPTR(ptr_reg);
1561 reg[1] = GET_WPTR(ptr_reg);
1562
1563 ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG;
1564
1565 reg[2] = GET_RPTR(ptr_reg);
1566 reg[3] = GET_WPTR(ptr_reg);
1567 netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n",
1568 reg[0], reg[1], reg[2], reg[3]);
1569}
1570
1571static void gmac_update_hw_stats(struct net_device *netdev)
1572{
1573 struct gemini_ethernet_port *port = netdev_priv(netdev);
1574 unsigned int rx_discards, rx_mcast, rx_bcast;
1575 struct gemini_ethernet *geth = port->geth;
1576 unsigned long flags;
1577
1578 spin_lock_irqsave(&geth->irq_lock, flags);
1579 u64_stats_update_begin(&port->ir_stats_syncp);
1580
1581 rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS);
1582 port->hw_stats[0] += rx_discards;
1583 port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS);
1584 rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST);
1585 port->hw_stats[2] += rx_mcast;
1586 rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST);
1587 port->hw_stats[3] += rx_bcast;
1588 port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1);
1589 port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2);
1590
1591 port->stats.rx_missed_errors += rx_discards;
1592 port->stats.multicast += rx_mcast;
1593 port->stats.multicast += rx_bcast;
1594
1595 writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8),
1596 geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
1597
1598 u64_stats_update_end(&port->ir_stats_syncp);
1599 spin_unlock_irqrestore(&geth->irq_lock, flags);
1600}
1601
1602/**
1603 * gmac_get_intr_flags() - get interrupt status flags for a port from
1604 * @netdev: the net device for the port to get flags from
1605 * @i: the interrupt status register 0..4
1606 */
1607static u32 gmac_get_intr_flags(struct net_device *netdev, int i)
1608{
1609 struct gemini_ethernet_port *port = netdev_priv(netdev);
1610 struct gemini_ethernet *geth = port->geth;
1611 void __iomem *irqif_reg, *irqen_reg;
1612 unsigned int offs, val;
1613
1614 /* Calculate the offset using the stride of the status registers */
1615 offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG -
1616 GLOBAL_INTERRUPT_STATUS_0_REG);
1617
1618 irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs;
1619 irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs;
1620
1621 val = readl(irqif_reg) & readl(irqen_reg);
1622 return val;
1623}
1624
1625static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer)
1626{
1627 struct gemini_ethernet_port *port =
1628 container_of(timer, struct gemini_ethernet_port,
1629 rx_coalesce_timer);
1630
1631 napi_schedule(&port->napi);
1632 return HRTIMER_NORESTART;
1633}
1634
1635static irqreturn_t gmac_irq(int irq, void *data)
1636{
1637 struct gemini_ethernet_port *port;
1638 struct net_device *netdev = data;
1639 struct gemini_ethernet *geth;
1640 u32 val, orr = 0;
1641
1642 port = netdev_priv(netdev);
1643 geth = port->geth;
1644
1645 val = gmac_get_intr_flags(netdev, 0);
1646 orr |= val;
1647
1648 if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) {
1649 /* Oh, crap */
1650 netdev_err(netdev, "hw failure/sw bug\n");
1651 gmac_dump_dma_state(netdev);
1652
1653 /* don't know how to recover, just reduce losses */
1654 gmac_enable_irq(netdev, 0);
1655 return IRQ_HANDLED;
1656 }
1657
1658 if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6)))
1659 gmac_tx_irq(netdev, 0);
1660
1661 val = gmac_get_intr_flags(netdev, 1);
1662 orr |= val;
1663
1664 if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) {
1665 gmac_enable_rx_irq(netdev, 0);
1666
1667 if (!port->rx_coalesce_nsecs) {
1668 napi_schedule(&port->napi);
1669 } else {
1670 ktime_t ktime;
1671
1672 ktime = ktime_set(0, port->rx_coalesce_nsecs);
1673 hrtimer_start(&port->rx_coalesce_timer, ktime,
1674 HRTIMER_MODE_REL);
1675 }
1676 }
1677
1678 val = gmac_get_intr_flags(netdev, 4);
1679 orr |= val;
1680
1681 if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8)))
1682 gmac_update_hw_stats(netdev);
1683
1684 if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
1685 writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
1686 geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
1687
1688 spin_lock(&geth->irq_lock);
1689 u64_stats_update_begin(&port->ir_stats_syncp);
1690 ++port->stats.rx_fifo_errors;
1691 u64_stats_update_end(&port->ir_stats_syncp);
1692 spin_unlock(&geth->irq_lock);
1693 }
1694
1695 return orr ? IRQ_HANDLED : IRQ_NONE;
1696}
1697
1698static void gmac_start_dma(struct gemini_ethernet_port *port)
1699{
1700 void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
1701 union gmac_dma_ctrl dma_ctrl;
1702
1703 dma_ctrl.bits32 = readl(dma_ctrl_reg);
1704 dma_ctrl.bits.rd_enable = 1;
1705 dma_ctrl.bits.td_enable = 1;
1706 dma_ctrl.bits.loopback = 0;
1707 dma_ctrl.bits.drop_small_ack = 0;
1708 dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN;
1709 dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED;
1710 dma_ctrl.bits.rd_burst_size = HBURST_INCR8;
1711 dma_ctrl.bits.rd_bus = HSIZE_8;
1712 dma_ctrl.bits.td_prot = HPROT_DATA_CACHE;
1713 dma_ctrl.bits.td_burst_size = HBURST_INCR8;
1714 dma_ctrl.bits.td_bus = HSIZE_8;
1715
1716 writel(dma_ctrl.bits32, dma_ctrl_reg);
1717}
1718
1719static void gmac_stop_dma(struct gemini_ethernet_port *port)
1720{
1721 void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG;
1722 union gmac_dma_ctrl dma_ctrl;
1723
1724 dma_ctrl.bits32 = readl(dma_ctrl_reg);
1725 dma_ctrl.bits.rd_enable = 0;
1726 dma_ctrl.bits.td_enable = 0;
1727 writel(dma_ctrl.bits32, dma_ctrl_reg);
1728}
1729
1730static int gmac_open(struct net_device *netdev)
1731{
1732 struct gemini_ethernet_port *port = netdev_priv(netdev);
1733 int err;
1734
1735 if (!netdev->phydev) {
1736 err = gmac_setup_phy(netdev);
1737 if (err) {
1738 netif_err(port, ifup, netdev,
1739 "PHY init failed: %d\n", err);
1740 return err;
1741 }
1742 }
1743
1744 err = request_irq(netdev->irq, gmac_irq,
1745 IRQF_SHARED, netdev->name, netdev);
1746 if (err) {
1747 netdev_err(netdev, "no IRQ\n");
1748 return err;
1749 }
1750
1751 netif_carrier_off(netdev);
1752 phy_start(netdev->phydev);
1753
1754 err = geth_resize_freeq(port);
1755 if (err) {
1756 netdev_err(netdev, "could not resize freeq\n");
1757 goto err_stop_phy;
1758 }
1759
1760 err = gmac_setup_rxq(netdev);
1761 if (err) {
1762 netdev_err(netdev, "could not setup RXQ\n");
1763 goto err_stop_phy;
1764 }
1765
1766 err = gmac_setup_txqs(netdev);
1767 if (err) {
1768 netdev_err(netdev, "could not setup TXQs\n");
1769 gmac_cleanup_rxq(netdev);
1770 goto err_stop_phy;
1771 }
1772
1773 napi_enable(&port->napi);
1774
1775 gmac_start_dma(port);
1776 gmac_enable_irq(netdev, 1);
1777 gmac_enable_tx_rx(netdev);
1778 netif_tx_start_all_queues(netdev);
1779
1780 hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
1781 HRTIMER_MODE_REL);
1782 port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
1783
1784 netdev_info(netdev, "opened\n");
1785
1786 return 0;
1787
1788err_stop_phy:
1789 phy_stop(netdev->phydev);
1790 free_irq(netdev->irq, netdev);
1791 return err;
1792}
1793
1794static int gmac_stop(struct net_device *netdev)
1795{
1796 struct gemini_ethernet_port *port = netdev_priv(netdev);
1797
1798 hrtimer_cancel(&port->rx_coalesce_timer);
1799 netif_tx_stop_all_queues(netdev);
1800 gmac_disable_tx_rx(netdev);
1801 gmac_stop_dma(port);
1802 napi_disable(&port->napi);
1803
1804 gmac_enable_irq(netdev, 0);
1805 gmac_cleanup_rxq(netdev);
1806 gmac_cleanup_txqs(netdev);
1807
1808 phy_stop(netdev->phydev);
1809 free_irq(netdev->irq, netdev);
1810
1811 gmac_update_hw_stats(netdev);
1812 return 0;
1813}
1814
1815static void gmac_set_rx_mode(struct net_device *netdev)
1816{
1817 struct gemini_ethernet_port *port = netdev_priv(netdev);
1818 union gmac_rx_fltr filter = { .bits = {
1819 .broadcast = 1,
1820 .multicast = 1,
1821 .unicast = 1,
1822 } };
1823 struct netdev_hw_addr *ha;
1824 unsigned int bit_nr;
1825 u32 mc_filter[2];
1826
1827 mc_filter[1] = 0;
1828 mc_filter[0] = 0;
1829
1830 if (netdev->flags & IFF_PROMISC) {
1831 filter.bits.error = 1;
1832 filter.bits.promiscuous = 1;
1833 mc_filter[1] = ~0;
1834 mc_filter[0] = ~0;
1835 } else if (netdev->flags & IFF_ALLMULTI) {
1836 mc_filter[1] = ~0;
1837 mc_filter[0] = ~0;
1838 } else {
1839 netdev_for_each_mc_addr(ha, netdev) {
1840 bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f;
1841 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f);
1842 }
1843 }
1844
1845 writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0);
1846 writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1);
1847 writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR);
1848}
1849
1850static void gmac_write_mac_address(struct net_device *netdev)
1851{
1852 struct gemini_ethernet_port *port = netdev_priv(netdev);
1853 __le32 addr[3];
1854
1855 memset(addr, 0, sizeof(addr));
1856 memcpy(addr, netdev->dev_addr, ETH_ALEN);
1857
1858 writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0);
1859 writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1);
1860 writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2);
1861}
1862
1863static int gmac_set_mac_address(struct net_device *netdev, void *addr)
1864{
1865 struct sockaddr *sa = addr;
1866
1867 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
1868 gmac_write_mac_address(netdev);
1869
1870 return 0;
1871}
1872
1873static void gmac_clear_hw_stats(struct net_device *netdev)
1874{
1875 struct gemini_ethernet_port *port = netdev_priv(netdev);
1876
1877 readl(port->gmac_base + GMAC_IN_DISCARDS);
1878 readl(port->gmac_base + GMAC_IN_ERRORS);
1879 readl(port->gmac_base + GMAC_IN_MCAST);
1880 readl(port->gmac_base + GMAC_IN_BCAST);
1881 readl(port->gmac_base + GMAC_IN_MAC1);
1882 readl(port->gmac_base + GMAC_IN_MAC2);
1883}
1884
1885static void gmac_get_stats64(struct net_device *netdev,
1886 struct rtnl_link_stats64 *stats)
1887{
1888 struct gemini_ethernet_port *port = netdev_priv(netdev);
1889 unsigned int start;
1890
1891 gmac_update_hw_stats(netdev);
1892
1893 /* Racing with RX NAPI */
1894 do {
1895 start = u64_stats_fetch_begin(&port->rx_stats_syncp);
1896
1897 stats->rx_packets = port->stats.rx_packets;
1898 stats->rx_bytes = port->stats.rx_bytes;
1899 stats->rx_errors = port->stats.rx_errors;
1900 stats->rx_dropped = port->stats.rx_dropped;
1901
1902 stats->rx_length_errors = port->stats.rx_length_errors;
1903 stats->rx_over_errors = port->stats.rx_over_errors;
1904 stats->rx_crc_errors = port->stats.rx_crc_errors;
1905 stats->rx_frame_errors = port->stats.rx_frame_errors;
1906
1907 } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
1908
1909 /* Racing with MIB and TX completion interrupts */
1910 do {
1911 start = u64_stats_fetch_begin(&port->ir_stats_syncp);
1912
1913 stats->tx_errors = port->stats.tx_errors;
1914 stats->tx_packets = port->stats.tx_packets;
1915 stats->tx_bytes = port->stats.tx_bytes;
1916
1917 stats->multicast = port->stats.multicast;
1918 stats->rx_missed_errors = port->stats.rx_missed_errors;
1919 stats->rx_fifo_errors = port->stats.rx_fifo_errors;
1920
1921 } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
1922
1923 /* Racing with hard_start_xmit */
1924 do {
1925 start = u64_stats_fetch_begin(&port->tx_stats_syncp);
1926
1927 stats->tx_dropped = port->stats.tx_dropped;
1928
1929 } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
1930
1931 stats->rx_dropped += stats->rx_missed_errors;
1932}
1933
1934static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
1935{
1936 int max_len = gmac_pick_rx_max_len(new_mtu);
1937
1938 if (max_len < 0)
1939 return -EINVAL;
1940
1941 gmac_disable_tx_rx(netdev);
1942
1943 netdev->mtu = new_mtu;
1944 gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
1945 CONFIG0_MAXLEN_MASK);
1946
1947 netdev_update_features(netdev);
1948
1949 gmac_enable_tx_rx(netdev);
1950
1951 return 0;
1952}
1953
1954static netdev_features_t gmac_fix_features(struct net_device *netdev,
1955 netdev_features_t features)
1956{
1957 if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
1958 features &= ~GMAC_OFFLOAD_FEATURES;
1959
1960 return features;
1961}
1962
1963static int gmac_set_features(struct net_device *netdev,
1964 netdev_features_t features)
1965{
1966 struct gemini_ethernet_port *port = netdev_priv(netdev);
1967 int enable = features & NETIF_F_RXCSUM;
1968 unsigned long flags;
1969 u32 reg;
1970
1971 spin_lock_irqsave(&port->config_lock, flags);
1972
1973 reg = readl(port->gmac_base + GMAC_CONFIG0);
1974 reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM;
1975 writel(reg, port->gmac_base + GMAC_CONFIG0);
1976
1977 spin_unlock_irqrestore(&port->config_lock, flags);
1978 return 0;
1979}
1980
1981static int gmac_get_sset_count(struct net_device *netdev, int sset)
1982{
1983 return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0;
1984}
1985
1986static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1987{
1988 if (stringset != ETH_SS_STATS)
1989 return;
1990
1991 memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings));
1992}
1993
1994static void gmac_get_ethtool_stats(struct net_device *netdev,
1995 struct ethtool_stats *estats, u64 *values)
1996{
1997 struct gemini_ethernet_port *port = netdev_priv(netdev);
1998 unsigned int start;
1999 u64 *p;
2000 int i;
2001
2002 gmac_update_hw_stats(netdev);
2003
2004 /* Racing with MIB interrupt */
2005 do {
2006 p = values;
2007 start = u64_stats_fetch_begin(&port->ir_stats_syncp);
2008
2009 for (i = 0; i < RX_STATS_NUM; i++)
2010 *p++ = port->hw_stats[i];
2011
2012 } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
2013 values = p;
2014
2015 /* Racing with RX NAPI */
2016 do {
2017 p = values;
2018 start = u64_stats_fetch_begin(&port->rx_stats_syncp);
2019
2020 for (i = 0; i < RX_STATUS_NUM; i++)
2021 *p++ = port->rx_stats[i];
2022 for (i = 0; i < RX_CHKSUM_NUM; i++)
2023 *p++ = port->rx_csum_stats[i];
2024 *p++ = port->rx_napi_exits;
2025
2026 } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
2027 values = p;
2028
2029 /* Racing with TX start_xmit */
2030 do {
2031 p = values;
2032 start = u64_stats_fetch_begin(&port->tx_stats_syncp);
2033
2034 for (i = 0; i < TX_MAX_FRAGS; i++) {
2035 *values++ = port->tx_frag_stats[i];
2036 port->tx_frag_stats[i] = 0;
2037 }
2038 *values++ = port->tx_frags_linearized;
2039 *values++ = port->tx_hw_csummed;
2040
2041 } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
2042}
2043
2044static int gmac_get_ksettings(struct net_device *netdev,
2045 struct ethtool_link_ksettings *cmd)
2046{
2047 if (!netdev->phydev)
2048 return -ENXIO;
2049 phy_ethtool_ksettings_get(netdev->phydev, cmd);
2050
2051 return 0;
2052}
2053
2054static int gmac_set_ksettings(struct net_device *netdev,
2055 const struct ethtool_link_ksettings *cmd)
2056{
2057 if (!netdev->phydev)
2058 return -ENXIO;
2059 return phy_ethtool_ksettings_set(netdev->phydev, cmd);
2060}
2061
2062static int gmac_nway_reset(struct net_device *netdev)
2063{
2064 if (!netdev->phydev)
2065 return -ENXIO;
2066 return phy_start_aneg(netdev->phydev);
2067}
2068
2069static void gmac_get_pauseparam(struct net_device *netdev,
2070 struct ethtool_pauseparam *pparam)
2071{
2072 struct gemini_ethernet_port *port = netdev_priv(netdev);
2073 union gmac_config0 config0;
2074
2075 config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
2076
2077 pparam->rx_pause = config0.bits.rx_fc_en;
2078 pparam->tx_pause = config0.bits.tx_fc_en;
2079 pparam->autoneg = true;
2080}
2081
2082static void gmac_get_ringparam(struct net_device *netdev,
2083 struct ethtool_ringparam *rp)
2084{
2085 struct gemini_ethernet_port *port = netdev_priv(netdev);
2086 union gmac_config0 config0;
2087
2088 config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
2089
2090 rp->rx_max_pending = 1 << 15;
2091 rp->rx_mini_max_pending = 0;
2092 rp->rx_jumbo_max_pending = 0;
2093 rp->tx_max_pending = 1 << 15;
2094
2095 rp->rx_pending = 1 << port->rxq_order;
2096 rp->rx_mini_pending = 0;
2097 rp->rx_jumbo_pending = 0;
2098 rp->tx_pending = 1 << port->txq_order;
2099}
2100
2101static int gmac_set_ringparam(struct net_device *netdev,
2102 struct ethtool_ringparam *rp)
2103{
2104 struct gemini_ethernet_port *port = netdev_priv(netdev);
2105 int err = 0;
2106
2107 if (netif_running(netdev))
2108 return -EBUSY;
2109
2110 if (rp->rx_pending) {
2111 port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1);
2112 err = geth_resize_freeq(port);
2113 }
2114 if (rp->tx_pending) {
2115 port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1);
2116 port->irq_every_tx_packets = 1 << (port->txq_order - 2);
2117 }
2118
2119 return err;
2120}
2121
2122static int gmac_get_coalesce(struct net_device *netdev,
2123 struct ethtool_coalesce *ecmd)
2124{
2125 struct gemini_ethernet_port *port = netdev_priv(netdev);
2126
2127 ecmd->rx_max_coalesced_frames = 1;
2128 ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets;
2129 ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000;
2130
2131 return 0;
2132}
2133
2134static int gmac_set_coalesce(struct net_device *netdev,
2135 struct ethtool_coalesce *ecmd)
2136{
2137 struct gemini_ethernet_port *port = netdev_priv(netdev);
2138
2139 if (ecmd->tx_max_coalesced_frames < 1)
2140 return -EINVAL;
2141 if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order)
2142 return -EINVAL;
2143
2144 port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames;
2145 port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000;
2146
2147 return 0;
2148}
2149
2150static u32 gmac_get_msglevel(struct net_device *netdev)
2151{
2152 struct gemini_ethernet_port *port = netdev_priv(netdev);
2153
2154 return port->msg_enable;
2155}
2156
2157static void gmac_set_msglevel(struct net_device *netdev, u32 level)
2158{
2159 struct gemini_ethernet_port *port = netdev_priv(netdev);
2160
2161 port->msg_enable = level;
2162}
2163
2164static void gmac_get_drvinfo(struct net_device *netdev,
2165 struct ethtool_drvinfo *info)
2166{
2167 strcpy(info->driver, DRV_NAME);
2168 strcpy(info->version, DRV_VERSION);
2169 strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
2170}
2171
2172static const struct net_device_ops gmac_351x_ops = {
2173 .ndo_init = gmac_init,
2174 .ndo_uninit = gmac_uninit,
2175 .ndo_open = gmac_open,
2176 .ndo_stop = gmac_stop,
2177 .ndo_start_xmit = gmac_start_xmit,
2178 .ndo_tx_timeout = gmac_tx_timeout,
2179 .ndo_set_rx_mode = gmac_set_rx_mode,
2180 .ndo_set_mac_address = gmac_set_mac_address,
2181 .ndo_get_stats64 = gmac_get_stats64,
2182 .ndo_change_mtu = gmac_change_mtu,
2183 .ndo_fix_features = gmac_fix_features,
2184 .ndo_set_features = gmac_set_features,
2185};
2186
2187static const struct ethtool_ops gmac_351x_ethtool_ops = {
2188 .get_sset_count = gmac_get_sset_count,
2189 .get_strings = gmac_get_strings,
2190 .get_ethtool_stats = gmac_get_ethtool_stats,
2191 .get_link = ethtool_op_get_link,
2192 .get_link_ksettings = gmac_get_ksettings,
2193 .set_link_ksettings = gmac_set_ksettings,
2194 .nway_reset = gmac_nway_reset,
2195 .get_pauseparam = gmac_get_pauseparam,
2196 .get_ringparam = gmac_get_ringparam,
2197 .set_ringparam = gmac_set_ringparam,
2198 .get_coalesce = gmac_get_coalesce,
2199 .set_coalesce = gmac_set_coalesce,
2200 .get_msglevel = gmac_get_msglevel,
2201 .set_msglevel = gmac_set_msglevel,
2202 .get_drvinfo = gmac_get_drvinfo,
2203};
2204
2205static irqreturn_t gemini_port_irq_thread(int irq, void *data)
2206{
2207 unsigned long irqmask = SWFQ_EMPTY_INT_BIT;
2208 struct gemini_ethernet_port *port = data;
2209 struct gemini_ethernet *geth;
2210 unsigned long flags;
2211
2212 geth = port->geth;
2213 /* The queue is half empty so refill it */
2214 geth_fill_freeq(geth, true);
2215
2216 spin_lock_irqsave(&geth->irq_lock, flags);
2217 /* ACK queue interrupt */
2218 writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
2219 /* Enable queue interrupt again */
2220 irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
2221 writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
2222 spin_unlock_irqrestore(&geth->irq_lock, flags);
2223
2224 return IRQ_HANDLED;
2225}
2226
2227static irqreturn_t gemini_port_irq(int irq, void *data)
2228{
2229 struct gemini_ethernet_port *port = data;
2230 struct gemini_ethernet *geth;
2231 irqreturn_t ret = IRQ_NONE;
2232 u32 val, en;
2233
2234 geth = port->geth;
2235 spin_lock(&geth->irq_lock);
2236
2237 val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
2238 en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
2239
2240 if (val & en & SWFQ_EMPTY_INT_BIT) {
2241 /* Disable the queue empty interrupt while we work on
2242 * processing the queue. Also disable overrun interrupts
2243 * as there is not much we can do about it here.
2244 */
2245 en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT
2246 | GMAC1_RX_OVERRUN_INT_BIT);
2247 writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
2248 ret = IRQ_WAKE_THREAD;
2249 }
2250
2251 spin_unlock(&geth->irq_lock);
2252
2253 return ret;
2254}
2255
2256static void gemini_port_remove(struct gemini_ethernet_port *port)
2257{
2258 if (port->netdev)
2259 unregister_netdev(port->netdev);
2260 clk_disable_unprepare(port->pclk);
2261 geth_cleanup_freeq(port->geth);
2262}
2263
2264static void gemini_ethernet_init(struct gemini_ethernet *geth)
2265{
2266 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
2267 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
2268 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
2269 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG);
2270 writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG);
2271
2272 /* Interrupt config:
2273 *
2274 * GMAC0 intr bits ------> int0 ----> eth0
2275 * GMAC1 intr bits ------> int1 ----> eth1
2276 * TOE intr -------------> int1 ----> eth1
2277 * Classification Intr --> int0 ----> eth0
2278 * Default Q0 -----------> int0 ----> eth0
2279 * Default Q1 -----------> int1 ----> eth1
2280 * FreeQ intr -----------> int1 ----> eth1
2281 */
2282 writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG);
2283 writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG);
2284 writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG);
2285 writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG);
2286 writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG);
2287
2288 /* edge-triggered interrupts packed to level-triggered one... */
2289 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG);
2290 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
2291 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG);
2292 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG);
2293 writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
2294
2295 /* Set up queue */
2296 writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG);
2297 writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG);
2298 writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG);
2299 writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG);
2300
2301 geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER;
2302 /* This makes the queue resize on probe() so that we
2303 * set up and enable the queue IRQ. FIXME: fragile.
2304 */
2305 geth->freeq_order = 1;
2306}
2307
2308static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
2309{
2310 port->mac_addr[0] =
2311 cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0));
2312 port->mac_addr[1] =
2313 cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1));
2314 port->mac_addr[2] =
2315 cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2));
2316}
2317
2318static int gemini_ethernet_port_probe(struct platform_device *pdev)
2319{
2320 char *port_names[2] = { "ethernet0", "ethernet1" };
2321 struct gemini_ethernet_port *port;
2322 struct device *dev = &pdev->dev;
2323 struct gemini_ethernet *geth;
2324 struct net_device *netdev;
2325 struct resource *gmacres;
2326 struct resource *dmares;
2327 struct device *parent;
2328 unsigned int id;
2329 int irq;
2330 int ret;
2331
2332 parent = dev->parent;
2333 geth = dev_get_drvdata(parent);
2334
2335 if (!strcmp(dev_name(dev), "60008000.ethernet-port"))
2336 id = 0;
2337 else if (!strcmp(dev_name(dev), "6000c000.ethernet-port"))
2338 id = 1;
2339 else
2340 return -ENODEV;
2341
2342 dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
2343
2344 netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
2345 if (!netdev) {
2346 dev_err(dev, "Can't allocate ethernet device #%d\n", id);
2347 return -ENOMEM;
2348 }
2349
2350 port = netdev_priv(netdev);
2351 SET_NETDEV_DEV(netdev, dev);
2352 port->netdev = netdev;
2353 port->id = id;
2354 port->geth = geth;
2355 port->dev = dev;
2356
2357 /* DMA memory */
2358 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2359 if (!dmares) {
2360 dev_err(dev, "no DMA resource\n");
2361 return -ENODEV;
2362 }
2363 port->dma_base = devm_ioremap_resource(dev, dmares);
2364 if (IS_ERR(port->dma_base))
2365 return PTR_ERR(port->dma_base);
2366
2367 /* GMAC config memory */
2368 gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2369 if (!gmacres) {
2370 dev_err(dev, "no GMAC resource\n");
2371 return -ENODEV;
2372 }
2373 port->gmac_base = devm_ioremap_resource(dev, gmacres);
2374 if (IS_ERR(port->gmac_base))
2375 return PTR_ERR(port->gmac_base);
2376
2377 /* Interrupt */
2378 irq = platform_get_irq(pdev, 0);
2379 if (irq <= 0) {
2380 dev_err(dev, "no IRQ\n");
2381 return irq ? irq : -ENODEV;
2382 }
2383 port->irq = irq;
2384
2385 /* Clock the port */
2386 port->pclk = devm_clk_get(dev, "PCLK");
2387 if (IS_ERR(port->pclk)) {
2388 dev_err(dev, "no PCLK\n");
2389 return PTR_ERR(port->pclk);
2390 }
2391 ret = clk_prepare_enable(port->pclk);
2392 if (ret)
2393 return ret;
2394
2395 /* Maybe there is a nice ethernet address we should use */
2396 gemini_port_save_mac_addr(port);
2397
2398 /* Reset the port */
2399 port->reset = devm_reset_control_get_exclusive(dev, NULL);
2400 if (IS_ERR(port->reset)) {
2401 dev_err(dev, "no reset\n");
2402 return PTR_ERR(port->reset);
2403 }
2404 reset_control_reset(port->reset);
2405 usleep_range(100, 500);
2406
2407 /* Assign pointer in the main state container */
2408 if (!id)
2409 geth->port0 = port;
2410 else
2411 geth->port1 = port;
2412 platform_set_drvdata(pdev, port);
2413
2414 /* Set up and register the netdev */
2415 netdev->dev_id = port->id;
2416 netdev->irq = irq;
2417 netdev->netdev_ops = &gmac_351x_ops;
2418 netdev->ethtool_ops = &gmac_351x_ethtool_ops;
2419
2420 spin_lock_init(&port->config_lock);
2421 gmac_clear_hw_stats(netdev);
2422
2423 netdev->hw_features = GMAC_OFFLOAD_FEATURES;
2424 netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
2425
2426 port->freeq_refill = 0;
2427 netif_napi_add(netdev, &port->napi, gmac_napi_poll,
2428 DEFAULT_NAPI_WEIGHT);
2429
2430 if (is_valid_ether_addr((void *)port->mac_addr)) {
2431 memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
2432 } else {
2433 dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
2434 port->mac_addr[0], port->mac_addr[1],
2435 port->mac_addr[2]);
2436 dev_info(dev, "using a random ethernet address\n");
2437 random_ether_addr(netdev->dev_addr);
2438 }
2439 gmac_write_mac_address(netdev);
2440
2441 ret = devm_request_threaded_irq(port->dev,
2442 port->irq,
2443 gemini_port_irq,
2444 gemini_port_irq_thread,
2445 IRQF_SHARED,
2446 port_names[port->id],
2447 port);
2448 if (ret)
2449 return ret;
2450
2451 ret = register_netdev(netdev);
2452 if (!ret) {
2453 netdev_info(netdev,
2454 "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
2455 port->irq, &dmares->start,
2456 &gmacres->start);
2457 ret = gmac_setup_phy(netdev);
2458 if (ret)
2459 netdev_info(netdev,
2460 "PHY init failed, deferring to ifup time\n");
2461 return 0;
2462 }
2463
2464 port->netdev = NULL;
2465 free_netdev(netdev);
2466 return ret;
2467}
2468
2469static int gemini_ethernet_port_remove(struct platform_device *pdev)
2470{
2471 struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
2472
2473 gemini_port_remove(port);
2474 return 0;
2475}
2476
2477static const struct of_device_id gemini_ethernet_port_of_match[] = {
2478 {
2479 .compatible = "cortina,gemini-ethernet-port",
2480 },
2481 {},
2482};
2483MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
2484
2485static struct platform_driver gemini_ethernet_port_driver = {
2486 .driver = {
2487 .name = "gemini-ethernet-port",
2488 .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
2489 },
2490 .probe = gemini_ethernet_port_probe,
2491 .remove = gemini_ethernet_port_remove,
2492};
2493
2494static int gemini_ethernet_probe(struct platform_device *pdev)
2495{
2496 struct device *dev = &pdev->dev;
2497 struct gemini_ethernet *geth;
2498 unsigned int retry = 5;
2499 struct resource *res;
2500 u32 val;
2501
2502 /* Global registers */
2503 geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
2504 if (!geth)
2505 return -ENOMEM;
2506 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2507 if (!res)
2508 return -ENODEV;
2509 geth->base = devm_ioremap_resource(dev, res);
2510 if (IS_ERR(geth->base))
2511 return PTR_ERR(geth->base);
2512 geth->dev = dev;
2513
2514 /* Wait for ports to stabilize */
2515 do {
2516 udelay(2);
2517 val = readl(geth->base + GLOBAL_TOE_VERSION_REG);
2518 barrier();
2519 } while (!val && --retry);
2520 if (!retry) {
2521 dev_err(dev, "failed to reset ethernet\n");
2522 return -EIO;
2523 }
2524 dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n",
2525 (val >> 4) & 0xFFFU, val & 0xFU);
2526
2527 spin_lock_init(&geth->irq_lock);
2528 spin_lock_init(&geth->freeq_lock);
2529 gemini_ethernet_init(geth);
2530
2531 /* The children will use this */
2532 platform_set_drvdata(pdev, geth);
2533
2534 /* Spawn child devices for the two ports */
2535 return devm_of_platform_populate(dev);
2536}
2537
2538static int gemini_ethernet_remove(struct platform_device *pdev)
2539{
2540 struct gemini_ethernet *geth = platform_get_drvdata(pdev);
2541
2542 gemini_ethernet_init(geth);
2543 geth_cleanup_freeq(geth);
2544
2545 return 0;
2546}
2547
2548static const struct of_device_id gemini_ethernet_of_match[] = {
2549 {
2550 .compatible = "cortina,gemini-ethernet",
2551 },
2552 {},
2553};
2554MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
2555
2556static struct platform_driver gemini_ethernet_driver = {
2557 .driver = {
2558 .name = DRV_NAME,
2559 .of_match_table = of_match_ptr(gemini_ethernet_of_match),
2560 },
2561 .probe = gemini_ethernet_probe,
2562 .remove = gemini_ethernet_remove,
2563};
2564
2565static int __init gemini_ethernet_module_init(void)
2566{
2567 int ret;
2568
2569 ret = platform_driver_register(&gemini_ethernet_port_driver);
2570 if (ret)
2571 return ret;
2572
2573 ret = platform_driver_register(&gemini_ethernet_driver);
2574 if (ret) {
2575 platform_driver_unregister(&gemini_ethernet_port_driver);
2576 return ret;
2577 }
2578
2579 return 0;
2580}
2581module_init(gemini_ethernet_module_init);
2582
2583static void __exit gemini_ethernet_module_exit(void)
2584{
2585 platform_driver_unregister(&gemini_ethernet_driver);
2586 platform_driver_unregister(&gemini_ethernet_port_driver);
2587}
2588module_exit(gemini_ethernet_module_exit);
2589
2590MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
2591MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver");
2592MODULE_LICENSE("GPL");
2593MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
new file mode 100644
index 000000000000..0b12f89bf89a
--- /dev/null
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -0,0 +1,958 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Register definitions for Gemini GMAC Ethernet device driver
3 *
4 * Copyright (C) 2006 Storlink, Corp.
5 * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
6 * Copyright (C) 2010 Michał Mirosław <mirq-linux@rere.qmqm.pl>
7 * Copytight (C) 2017 Linus Walleij <linus.walleij@linaro.org>
8 */
9#ifndef _GEMINI_ETHERNET_H
10#define _GEMINI_ETHERNET_H
11
12#include <linux/bitops.h>
13
14/* Base Registers */
15#define TOE_NONTOE_QUE_HDR_BASE 0x2000
16#define TOE_TOE_QUE_HDR_BASE 0x3000
17
18/* Queue ID */
19#define TOE_SW_FREE_QID 0x00
20#define TOE_HW_FREE_QID 0x01
21#define TOE_GMAC0_SW_TXQ0_QID 0x02
22#define TOE_GMAC0_SW_TXQ1_QID 0x03
23#define TOE_GMAC0_SW_TXQ2_QID 0x04
24#define TOE_GMAC0_SW_TXQ3_QID 0x05
25#define TOE_GMAC0_SW_TXQ4_QID 0x06
26#define TOE_GMAC0_SW_TXQ5_QID 0x07
27#define TOE_GMAC0_HW_TXQ0_QID 0x08
28#define TOE_GMAC0_HW_TXQ1_QID 0x09
29#define TOE_GMAC0_HW_TXQ2_QID 0x0A
30#define TOE_GMAC0_HW_TXQ3_QID 0x0B
31#define TOE_GMAC1_SW_TXQ0_QID 0x12
32#define TOE_GMAC1_SW_TXQ1_QID 0x13
33#define TOE_GMAC1_SW_TXQ2_QID 0x14
34#define TOE_GMAC1_SW_TXQ3_QID 0x15
35#define TOE_GMAC1_SW_TXQ4_QID 0x16
36#define TOE_GMAC1_SW_TXQ5_QID 0x17
37#define TOE_GMAC1_HW_TXQ0_QID 0x18
38#define TOE_GMAC1_HW_TXQ1_QID 0x19
39#define TOE_GMAC1_HW_TXQ2_QID 0x1A
40#define TOE_GMAC1_HW_TXQ3_QID 0x1B
41#define TOE_GMAC0_DEFAULT_QID 0x20
42#define TOE_GMAC1_DEFAULT_QID 0x21
43#define TOE_CLASSIFICATION_QID(x) (0x22 + x) /* 0x22 ~ 0x2F */
44#define TOE_TOE_QID(x) (0x40 + x) /* 0x40 ~ 0x7F */
45
46/* TOE DMA Queue Size should be 2^n, n = 6...12
47 * TOE DMA Queues are the following queue types:
48 * SW Free Queue, HW Free Queue,
49 * GMAC 0/1 SW TX Q0-5, and GMAC 0/1 HW TX Q0-5
50 * The base address and descriptor number are configured at
51 * DMA Queues Descriptor Ring Base Address/Size Register (offset 0x0004)
52 */
53#define GET_WPTR(addr) readw((addr) + 2)
54#define GET_RPTR(addr) readw((addr))
55#define SET_WPTR(addr, data) writew((data), (addr) + 2)
56#define SET_RPTR(addr, data) writew((data), (addr))
57#define __RWPTR_NEXT(x, mask) (((unsigned int)(x) + 1) & (mask))
58#define __RWPTR_PREV(x, mask) (((unsigned int)(x) - 1) & (mask))
59#define __RWPTR_DISTANCE(r, w, mask) (((unsigned int)(w) - (r)) & (mask))
60#define __RWPTR_MASK(order) ((1 << (order)) - 1)
61#define RWPTR_NEXT(x, order) __RWPTR_NEXT((x), __RWPTR_MASK((order)))
62#define RWPTR_PREV(x, order) __RWPTR_PREV((x), __RWPTR_MASK((order)))
63#define RWPTR_DISTANCE(r, w, order) __RWPTR_DISTANCE((r), (w), \
64 __RWPTR_MASK((order)))
65
66/* Global registers */
67#define GLOBAL_TOE_VERSION_REG 0x0000
68#define GLOBAL_SW_FREEQ_BASE_SIZE_REG 0x0004
69#define GLOBAL_HW_FREEQ_BASE_SIZE_REG 0x0008
70#define GLOBAL_DMA_SKB_SIZE_REG 0x0010
71#define GLOBAL_SWFQ_RWPTR_REG 0x0014
72#define GLOBAL_HWFQ_RWPTR_REG 0x0018
73#define GLOBAL_INTERRUPT_STATUS_0_REG 0x0020
74#define GLOBAL_INTERRUPT_ENABLE_0_REG 0x0024
75#define GLOBAL_INTERRUPT_SELECT_0_REG 0x0028
76#define GLOBAL_INTERRUPT_STATUS_1_REG 0x0030
77#define GLOBAL_INTERRUPT_ENABLE_1_REG 0x0034
78#define GLOBAL_INTERRUPT_SELECT_1_REG 0x0038
79#define GLOBAL_INTERRUPT_STATUS_2_REG 0x0040
80#define GLOBAL_INTERRUPT_ENABLE_2_REG 0x0044
81#define GLOBAL_INTERRUPT_SELECT_2_REG 0x0048
82#define GLOBAL_INTERRUPT_STATUS_3_REG 0x0050
83#define GLOBAL_INTERRUPT_ENABLE_3_REG 0x0054
84#define GLOBAL_INTERRUPT_SELECT_3_REG 0x0058
85#define GLOBAL_INTERRUPT_STATUS_4_REG 0x0060
86#define GLOBAL_INTERRUPT_ENABLE_4_REG 0x0064
87#define GLOBAL_INTERRUPT_SELECT_4_REG 0x0068
88#define GLOBAL_HASH_TABLE_BASE_REG 0x006C
89#define GLOBAL_QUEUE_THRESHOLD_REG 0x0070
90
91/* GMAC 0/1 DMA/TOE register */
92#define GMAC_DMA_CTRL_REG 0x0000
93#define GMAC_TX_WEIGHTING_CTRL_0_REG 0x0004
94#define GMAC_TX_WEIGHTING_CTRL_1_REG 0x0008
95#define GMAC_SW_TX_QUEUE0_PTR_REG 0x000C
96#define GMAC_SW_TX_QUEUE1_PTR_REG 0x0010
97#define GMAC_SW_TX_QUEUE2_PTR_REG 0x0014
98#define GMAC_SW_TX_QUEUE3_PTR_REG 0x0018
99#define GMAC_SW_TX_QUEUE4_PTR_REG 0x001C
100#define GMAC_SW_TX_QUEUE5_PTR_REG 0x0020
101#define GMAC_SW_TX_QUEUE_PTR_REG(i) (GMAC_SW_TX_QUEUE0_PTR_REG + 4 * (i))
102#define GMAC_HW_TX_QUEUE0_PTR_REG 0x0024
103#define GMAC_HW_TX_QUEUE1_PTR_REG 0x0028
104#define GMAC_HW_TX_QUEUE2_PTR_REG 0x002C
105#define GMAC_HW_TX_QUEUE3_PTR_REG 0x0030
106#define GMAC_HW_TX_QUEUE_PTR_REG(i) (GMAC_HW_TX_QUEUE0_PTR_REG + 4 * (i))
107#define GMAC_DMA_TX_FIRST_DESC_REG 0x0038
108#define GMAC_DMA_TX_CURR_DESC_REG 0x003C
109#define GMAC_DMA_TX_DESC_WORD0_REG 0x0040
110#define GMAC_DMA_TX_DESC_WORD1_REG 0x0044
111#define GMAC_DMA_TX_DESC_WORD2_REG 0x0048
112#define GMAC_DMA_TX_DESC_WORD3_REG 0x004C
113#define GMAC_SW_TX_QUEUE_BASE_REG 0x0050
114#define GMAC_HW_TX_QUEUE_BASE_REG 0x0054
115#define GMAC_DMA_RX_FIRST_DESC_REG 0x0058
116#define GMAC_DMA_RX_CURR_DESC_REG 0x005C
117#define GMAC_DMA_RX_DESC_WORD0_REG 0x0060
118#define GMAC_DMA_RX_DESC_WORD1_REG 0x0064
119#define GMAC_DMA_RX_DESC_WORD2_REG 0x0068
120#define GMAC_DMA_RX_DESC_WORD3_REG 0x006C
121#define GMAC_HASH_ENGINE_REG0 0x0070
122#define GMAC_HASH_ENGINE_REG1 0x0074
123/* matching rule 0 Control register 0 */
124#define GMAC_MR0CR0 0x0078
125#define GMAC_MR0CR1 0x007C
126#define GMAC_MR0CR2 0x0080
127#define GMAC_MR1CR0 0x0084
128#define GMAC_MR1CR1 0x0088
129#define GMAC_MR1CR2 0x008C
130#define GMAC_MR2CR0 0x0090
131#define GMAC_MR2CR1 0x0094
132#define GMAC_MR2CR2 0x0098
133#define GMAC_MR3CR0 0x009C
134#define GMAC_MR3CR1 0x00A0
135#define GMAC_MR3CR2 0x00A4
136/* Support Protocol Register 0 */
137#define GMAC_SPR0 0x00A8
138#define GMAC_SPR1 0x00AC
139#define GMAC_SPR2 0x00B0
140#define GMAC_SPR3 0x00B4
141#define GMAC_SPR4 0x00B8
142#define GMAC_SPR5 0x00BC
143#define GMAC_SPR6 0x00C0
144#define GMAC_SPR7 0x00C4
145/* GMAC Hash/Rx/Tx AHB Weighting register */
146#define GMAC_AHB_WEIGHT_REG 0x00C8
147
148/* TOE GMAC 0/1 register */
149#define GMAC_STA_ADD0 0x0000
150#define GMAC_STA_ADD1 0x0004
151#define GMAC_STA_ADD2 0x0008
152#define GMAC_RX_FLTR 0x000c
153#define GMAC_MCAST_FIL0 0x0010
154#define GMAC_MCAST_FIL1 0x0014
155#define GMAC_CONFIG0 0x0018
156#define GMAC_CONFIG1 0x001c
157#define GMAC_CONFIG2 0x0020
158#define GMAC_CONFIG3 0x0024
159#define GMAC_RESERVED 0x0028
160#define GMAC_STATUS 0x002c
161#define GMAC_IN_DISCARDS 0x0030
162#define GMAC_IN_ERRORS 0x0034
163#define GMAC_IN_MCAST 0x0038
164#define GMAC_IN_BCAST 0x003c
165#define GMAC_IN_MAC1 0x0040 /* for STA 1 MAC Address */
166#define GMAC_IN_MAC2 0x0044 /* for STA 2 MAC Address */
167
168#define RX_STATS_NUM 6
169
170/* DMA Queues description Ring Base Address/Size Register (offset 0x0004) */
171union dma_q_base_size {
172 unsigned int bits32;
173 unsigned int base_size;
174};
175
176#define DMA_Q_BASE_MASK (~0x0f)
177
178/* DMA SKB Buffer register (offset 0x0008) */
179union dma_skb_size {
180 unsigned int bits32;
181 struct bit_0008 {
182 unsigned int sw_skb_size : 16; /* SW Free poll SKB Size */
183 unsigned int hw_skb_size : 16; /* HW Free poll SKB Size */
184 } bits;
185};
186
187/* DMA SW Free Queue Read/Write Pointer Register (offset 0x000c) */
188union dma_rwptr {
189 unsigned int bits32;
190 struct bit_000c {
191 unsigned int rptr : 16; /* Read Ptr, RO */
192 unsigned int wptr : 16; /* Write Ptr, RW */
193 } bits;
194};
195
196/* Interrupt Status Register 0 (offset 0x0020)
197 * Interrupt Mask Register 0 (offset 0x0024)
198 * Interrupt Select Register 0 (offset 0x0028)
199 */
200#define GMAC1_TXDERR_INT_BIT BIT(31)
201#define GMAC1_TXPERR_INT_BIT BIT(30)
202#define GMAC0_TXDERR_INT_BIT BIT(29)
203#define GMAC0_TXPERR_INT_BIT BIT(28)
204#define GMAC1_RXDERR_INT_BIT BIT(27)
205#define GMAC1_RXPERR_INT_BIT BIT(26)
206#define GMAC0_RXDERR_INT_BIT BIT(25)
207#define GMAC0_RXPERR_INT_BIT BIT(24)
208#define GMAC1_SWTQ15_FIN_INT_BIT BIT(23)
209#define GMAC1_SWTQ14_FIN_INT_BIT BIT(22)
210#define GMAC1_SWTQ13_FIN_INT_BIT BIT(21)
211#define GMAC1_SWTQ12_FIN_INT_BIT BIT(20)
212#define GMAC1_SWTQ11_FIN_INT_BIT BIT(19)
213#define GMAC1_SWTQ10_FIN_INT_BIT BIT(18)
214#define GMAC0_SWTQ05_FIN_INT_BIT BIT(17)
215#define GMAC0_SWTQ04_FIN_INT_BIT BIT(16)
216#define GMAC0_SWTQ03_FIN_INT_BIT BIT(15)
217#define GMAC0_SWTQ02_FIN_INT_BIT BIT(14)
218#define GMAC0_SWTQ01_FIN_INT_BIT BIT(13)
219#define GMAC0_SWTQ00_FIN_INT_BIT BIT(12)
220#define GMAC1_SWTQ15_EOF_INT_BIT BIT(11)
221#define GMAC1_SWTQ14_EOF_INT_BIT BIT(10)
222#define GMAC1_SWTQ13_EOF_INT_BIT BIT(9)
223#define GMAC1_SWTQ12_EOF_INT_BIT BIT(8)
224#define GMAC1_SWTQ11_EOF_INT_BIT BIT(7)
225#define GMAC1_SWTQ10_EOF_INT_BIT BIT(6)
226#define GMAC0_SWTQ05_EOF_INT_BIT BIT(5)
227#define GMAC0_SWTQ04_EOF_INT_BIT BIT(4)
228#define GMAC0_SWTQ03_EOF_INT_BIT BIT(3)
229#define GMAC0_SWTQ02_EOF_INT_BIT BIT(2)
230#define GMAC0_SWTQ01_EOF_INT_BIT BIT(1)
231#define GMAC0_SWTQ00_EOF_INT_BIT BIT(0)
232
233/* Interrupt Status Register 1 (offset 0x0030)
234 * Interrupt Mask Register 1 (offset 0x0034)
235 * Interrupt Select Register 1 (offset 0x0038)
236 */
237#define TOE_IQ3_FULL_INT_BIT BIT(31)
238#define TOE_IQ2_FULL_INT_BIT BIT(30)
239#define TOE_IQ1_FULL_INT_BIT BIT(29)
240#define TOE_IQ0_FULL_INT_BIT BIT(28)
241#define TOE_IQ3_INT_BIT BIT(27)
242#define TOE_IQ2_INT_BIT BIT(26)
243#define TOE_IQ1_INT_BIT BIT(25)
244#define TOE_IQ0_INT_BIT BIT(24)
245#define GMAC1_HWTQ13_EOF_INT_BIT BIT(23)
246#define GMAC1_HWTQ12_EOF_INT_BIT BIT(22)
247#define GMAC1_HWTQ11_EOF_INT_BIT BIT(21)
248#define GMAC1_HWTQ10_EOF_INT_BIT BIT(20)
249#define GMAC0_HWTQ03_EOF_INT_BIT BIT(19)
250#define GMAC0_HWTQ02_EOF_INT_BIT BIT(18)
251#define GMAC0_HWTQ01_EOF_INT_BIT BIT(17)
252#define GMAC0_HWTQ00_EOF_INT_BIT BIT(16)
253#define CLASS_RX_INT_BIT(x) BIT((x + 2))
254#define DEFAULT_Q1_INT_BIT BIT(1)
255#define DEFAULT_Q0_INT_BIT BIT(0)
256
257#define TOE_IQ_INT_BITS (TOE_IQ0_INT_BIT | TOE_IQ1_INT_BIT | \
258 TOE_IQ2_INT_BIT | TOE_IQ3_INT_BIT)
259#define TOE_IQ_FULL_BITS (TOE_IQ0_FULL_INT_BIT | TOE_IQ1_FULL_INT_BIT | \
260 TOE_IQ2_FULL_INT_BIT | TOE_IQ3_FULL_INT_BIT)
261#define TOE_IQ_ALL_BITS (TOE_IQ_INT_BITS | TOE_IQ_FULL_BITS)
262#define TOE_CLASS_RX_INT_BITS 0xfffc
263
264/* Interrupt Status Register 2 (offset 0x0040)
265 * Interrupt Mask Register 2 (offset 0x0044)
266 * Interrupt Select Register 2 (offset 0x0048)
267 */
268#define TOE_QL_FULL_INT_BIT(x) BIT(x)
269
270/* Interrupt Status Register 3 (offset 0x0050)
271 * Interrupt Mask Register 3 (offset 0x0054)
272 * Interrupt Select Register 3 (offset 0x0058)
273 */
274#define TOE_QH_FULL_INT_BIT(x) BIT(x - 32)
275
276/* Interrupt Status Register 4 (offset 0x0060)
277 * Interrupt Mask Register 4 (offset 0x0064)
278 * Interrupt Select Register 4 (offset 0x0068)
279 */
280#define GMAC1_RESERVED_INT_BIT BIT(31)
281#define GMAC1_MIB_INT_BIT BIT(30)
282#define GMAC1_RX_PAUSE_ON_INT_BIT BIT(29)
283#define GMAC1_TX_PAUSE_ON_INT_BIT BIT(28)
284#define GMAC1_RX_PAUSE_OFF_INT_BIT BIT(27)
285#define GMAC1_TX_PAUSE_OFF_INT_BIT BIT(26)
286#define GMAC1_RX_OVERRUN_INT_BIT BIT(25)
287#define GMAC1_STATUS_CHANGE_INT_BIT BIT(24)
288#define GMAC0_RESERVED_INT_BIT BIT(23)
289#define GMAC0_MIB_INT_BIT BIT(22)
290#define GMAC0_RX_PAUSE_ON_INT_BIT BIT(21)
291#define GMAC0_TX_PAUSE_ON_INT_BIT BIT(20)
292#define GMAC0_RX_PAUSE_OFF_INT_BIT BIT(19)
293#define GMAC0_TX_PAUSE_OFF_INT_BIT BIT(18)
294#define GMAC0_RX_OVERRUN_INT_BIT BIT(17)
295#define GMAC0_STATUS_CHANGE_INT_BIT BIT(16)
296#define CLASS_RX_FULL_INT_BIT(x) BIT(x + 2)
297#define HWFQ_EMPTY_INT_BIT BIT(1)
298#define SWFQ_EMPTY_INT_BIT BIT(0)
299
300#define GMAC0_INT_BITS (GMAC0_RESERVED_INT_BIT | GMAC0_MIB_INT_BIT | \
301 GMAC0_RX_PAUSE_ON_INT_BIT | \
302 GMAC0_TX_PAUSE_ON_INT_BIT | \
303 GMAC0_RX_PAUSE_OFF_INT_BIT | \
304 GMAC0_TX_PAUSE_OFF_INT_BIT | \
305 GMAC0_RX_OVERRUN_INT_BIT | \
306 GMAC0_STATUS_CHANGE_INT_BIT)
307#define GMAC1_INT_BITS (GMAC1_RESERVED_INT_BIT | GMAC1_MIB_INT_BIT | \
308 GMAC1_RX_PAUSE_ON_INT_BIT | \
309 GMAC1_TX_PAUSE_ON_INT_BIT | \
310 GMAC1_RX_PAUSE_OFF_INT_BIT | \
311 GMAC1_TX_PAUSE_OFF_INT_BIT | \
312 GMAC1_RX_OVERRUN_INT_BIT | \
313 GMAC1_STATUS_CHANGE_INT_BIT)
314
315#define CLASS_RX_FULL_INT_BITS 0xfffc
316
317/* GLOBAL_QUEUE_THRESHOLD_REG (offset 0x0070) */
318union queue_threshold {
319 unsigned int bits32;
320 struct bit_0070_2 {
321 /* 7:0 Software Free Queue Empty Threshold */
322 unsigned int swfq_empty:8;
323 /* 15:8 Hardware Free Queue Empty Threshold */
324 unsigned int hwfq_empty:8;
325 /* 23:16 */
326 unsigned int intrq:8;
327 /* 31:24 */
328 unsigned int toe_class:8;
329 } bits;
330};
331
332/* GMAC DMA Control Register
333 * GMAC0 offset 0x8000
334 * GMAC1 offset 0xC000
335 */
336union gmac_dma_ctrl {
337 unsigned int bits32;
338 struct bit_8000 {
339 /* bit 1:0 Peripheral Bus Width */
340 unsigned int td_bus:2;
341 /* bit 3:2 TxDMA max burst size for every AHB request */
342 unsigned int td_burst_size:2;
343 /* bit 7:4 TxDMA protection control */
344 unsigned int td_prot:4;
345 /* bit 9:8 Peripheral Bus Width */
346 unsigned int rd_bus:2;
347 /* bit 11:10 DMA max burst size for every AHB request */
348 unsigned int rd_burst_size:2;
349 /* bit 15:12 DMA Protection Control */
350 unsigned int rd_prot:4;
351 /* bit 17:16 */
352 unsigned int rd_insert_bytes:2;
353 /* bit 27:18 */
354 unsigned int reserved:10;
355 /* bit 28 1: Drop, 0: Accept */
356 unsigned int drop_small_ack:1;
357 /* bit 29 Loopback TxDMA to RxDMA */
358 unsigned int loopback:1;
359 /* bit 30 Tx DMA Enable */
360 unsigned int td_enable:1;
361 /* bit 31 Rx DMA Enable */
362 unsigned int rd_enable:1;
363 } bits;
364};
365
366/* GMAC Tx Weighting Control Register 0
367 * GMAC0 offset 0x8004
368 * GMAC1 offset 0xC004
369 */
370union gmac_tx_wcr0 {
371 unsigned int bits32;
372 struct bit_8004 {
373 /* bit 5:0 HW TX Queue 3 */
374 unsigned int hw_tq0:6;
375 /* bit 11:6 HW TX Queue 2 */
376 unsigned int hw_tq1:6;
377 /* bit 17:12 HW TX Queue 1 */
378 unsigned int hw_tq2:6;
379 /* bit 23:18 HW TX Queue 0 */
380 unsigned int hw_tq3:6;
381 /* bit 31:24 */
382 unsigned int reserved:8;
383 } bits;
384};
385
386/* GMAC Tx Weighting Control Register 1
387 * GMAC0 offset 0x8008
388 * GMAC1 offset 0xC008
389 */
390union gmac_tx_wcr1 {
391 unsigned int bits32;
392 struct bit_8008 {
393 /* bit 4:0 SW TX Queue 0 */
394 unsigned int sw_tq0:5;
395 /* bit 9:5 SW TX Queue 1 */
396 unsigned int sw_tq1:5;
397 /* bit 14:10 SW TX Queue 2 */
398 unsigned int sw_tq2:5;
399 /* bit 19:15 SW TX Queue 3 */
400 unsigned int sw_tq3:5;
401 /* bit 24:20 SW TX Queue 4 */
402 unsigned int sw_tq4:5;
403 /* bit 29:25 SW TX Queue 5 */
404 unsigned int sw_tq5:5;
405 /* bit 31:30 */
406 unsigned int reserved:2;
407 } bits;
408};
409
410/* GMAC DMA Tx Description Word 0 Register
411 * GMAC0 offset 0x8040
412 * GMAC1 offset 0xC040
413 */
414union gmac_txdesc_0 {
415 unsigned int bits32;
416 struct bit_8040 {
417 /* bit 15:0 Transfer size */
418 unsigned int buffer_size:16;
419 /* bit 21:16 number of descriptors used for the current frame */
420 unsigned int desc_count:6;
421 /* bit 22 Tx Status, 1: Successful 0: Failed */
422 unsigned int status_tx_ok:1;
423 /* bit 28:23 Tx Status, Reserved bits */
424 unsigned int status_rvd:6;
425 /* bit 29 protocol error during processing this descriptor */
426 unsigned int perr:1;
427 /* bit 30 data error during processing this descriptor */
428 unsigned int derr:1;
429 /* bit 31 */
430 unsigned int reserved:1;
431 } bits;
432};
433
434/* GMAC DMA Tx Description Word 1 Register
435 * GMAC0 offset 0x8044
436 * GMAC1 offset 0xC044
437 */
438union gmac_txdesc_1 {
439 unsigned int bits32;
440 struct txdesc_word1 {
441 /* bit 15: 0 Tx Frame Byte Count */
442 unsigned int byte_count:16;
443 /* bit 16 TSS segmentation use MTU setting */
444 unsigned int mtu_enable:1;
445 /* bit 17 IPV4 Header Checksum Enable */
446 unsigned int ip_chksum:1;
447 /* bit 18 IPV6 Tx Enable */
448 unsigned int ipv6_enable:1;
449 /* bit 19 TCP Checksum Enable */
450 unsigned int tcp_chksum:1;
451 /* bit 20 UDP Checksum Enable */
452 unsigned int udp_chksum:1;
453 /* bit 21 Bypass HW offload engine */
454 unsigned int bypass_tss:1;
455 /* bit 22 Don't update IP length field */
456 unsigned int ip_fixed_len:1;
457 /* bit 31:23 Tx Flag, Reserved */
458 unsigned int reserved:9;
459 } bits;
460};
461
462#define TSS_IP_FIXED_LEN_BIT BIT(22)
463#define TSS_BYPASS_BIT BIT(21)
464#define TSS_UDP_CHKSUM_BIT BIT(20)
465#define TSS_TCP_CHKSUM_BIT BIT(19)
466#define TSS_IPV6_ENABLE_BIT BIT(18)
467#define TSS_IP_CHKSUM_BIT BIT(17)
468#define TSS_MTU_ENABLE_BIT BIT(16)
469
470#define TSS_CHECKUM_ENABLE \
471 (TSS_IP_CHKSUM_BIT | TSS_IPV6_ENABLE_BIT | \
472 TSS_TCP_CHKSUM_BIT | TSS_UDP_CHKSUM_BIT)
473
474/* GMAC DMA Tx Description Word 2 Register
475 * GMAC0 offset 0x8048
476 * GMAC1 offset 0xC048
477 */
478union gmac_txdesc_2 {
479 unsigned int bits32;
480 unsigned int buf_adr;
481};
482
483/* GMAC DMA Tx Description Word 3 Register
484 * GMAC0 offset 0x804C
485 * GMAC1 offset 0xC04C
486 */
487union gmac_txdesc_3 {
488 unsigned int bits32;
489 struct txdesc_word3 {
490 /* bit 12: 0 Tx Frame Byte Count */
491 unsigned int mtu_size:13;
492 /* bit 28:13 */
493 unsigned int reserved:16;
494 /* bit 29 End of frame interrupt enable */
495 unsigned int eofie:1;
496 /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
497 unsigned int sof_eof:2;
498 } bits;
499};
500
501#define SOF_EOF_BIT_MASK 0x3fffffff
502#define SOF_BIT 0x80000000
503#define EOF_BIT 0x40000000
504#define EOFIE_BIT BIT(29)
505#define MTU_SIZE_BIT_MASK 0x1fff
506
507/* GMAC Tx Descriptor */
508struct gmac_txdesc {
509 union gmac_txdesc_0 word0;
510 union gmac_txdesc_1 word1;
511 union gmac_txdesc_2 word2;
512 union gmac_txdesc_3 word3;
513};
514
515/* GMAC DMA Rx Description Word 0 Register
516 * GMAC0 offset 0x8060
517 * GMAC1 offset 0xC060
518 */
519union gmac_rxdesc_0 {
520 unsigned int bits32;
521 struct bit_8060 {
522 /* bit 15:0 number of descriptors used for the current frame */
523 unsigned int buffer_size:16;
524 /* bit 21:16 number of descriptors used for the current frame */
525 unsigned int desc_count:6;
526 /* bit 24:22 Status of rx frame */
527 unsigned int status:4;
528 /* bit 28:26 Check Sum Status */
529 unsigned int chksum_status:3;
530 /* bit 29 protocol error during processing this descriptor */
531 unsigned int perr:1;
532 /* bit 30 data error during processing this descriptor */
533 unsigned int derr:1;
534 /* bit 31 TOE/CIS Queue Full dropped packet to default queue */
535 unsigned int drop:1;
536 } bits;
537};
538
539#define GMAC_RXDESC_0_T_derr BIT(30)
540#define GMAC_RXDESC_0_T_perr BIT(29)
541#define GMAC_RXDESC_0_T_chksum_status(x) BIT(x + 26)
542#define GMAC_RXDESC_0_T_status(x) BIT(x + 22)
543#define GMAC_RXDESC_0_T_desc_count(x) BIT(x + 16)
544
545#define RX_CHKSUM_IP_UDP_TCP_OK 0
546#define RX_CHKSUM_IP_OK_ONLY 1
547#define RX_CHKSUM_NONE 2
548#define RX_CHKSUM_IP_ERR_UNKNOWN 4
549#define RX_CHKSUM_IP_ERR 5
550#define RX_CHKSUM_TCP_UDP_ERR 6
551#define RX_CHKSUM_NUM 8
552
553#define RX_STATUS_GOOD_FRAME 0
554#define RX_STATUS_TOO_LONG_GOOD_CRC 1
555#define RX_STATUS_RUNT_FRAME 2
556#define RX_STATUS_SFD_NOT_FOUND 3
557#define RX_STATUS_CRC_ERROR 4
558#define RX_STATUS_TOO_LONG_BAD_CRC 5
559#define RX_STATUS_ALIGNMENT_ERROR 6
560#define RX_STATUS_TOO_LONG_BAD_ALIGN 7
561#define RX_STATUS_RX_ERR 8
562#define RX_STATUS_DA_FILTERED 9
563#define RX_STATUS_BUFFER_FULL 10
564#define RX_STATUS_NUM 16
565
566#define RX_ERROR_LENGTH(s) \
567 ((s) == RX_STATUS_TOO_LONG_GOOD_CRC || \
568 (s) == RX_STATUS_TOO_LONG_BAD_CRC || \
569 (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
570#define RX_ERROR_OVER(s) \
571 ((s) == RX_STATUS_BUFFER_FULL)
572#define RX_ERROR_CRC(s) \
573 ((s) == RX_STATUS_CRC_ERROR || \
574 (s) == RX_STATUS_TOO_LONG_BAD_CRC)
575#define RX_ERROR_FRAME(s) \
576 ((s) == RX_STATUS_ALIGNMENT_ERROR || \
577 (s) == RX_STATUS_TOO_LONG_BAD_ALIGN)
578#define RX_ERROR_FIFO(s) \
579 (0)
580
581/* GMAC DMA Rx Description Word 1 Register
582 * GMAC0 offset 0x8064
583 * GMAC1 offset 0xC064
584 */
585union gmac_rxdesc_1 {
586 unsigned int bits32;
587 struct rxdesc_word1 {
588 /* bit 15: 0 Rx Frame Byte Count */
589 unsigned int byte_count:16;
590 /* bit 31:16 Software ID */
591 unsigned int sw_id:16;
592 } bits;
593};
594
595/* GMAC DMA Rx Description Word 2 Register
596 * GMAC0 offset 0x8068
597 * GMAC1 offset 0xC068
598 */
599union gmac_rxdesc_2 {
600 unsigned int bits32;
601 unsigned int buf_adr;
602};
603
604#define RX_INSERT_NONE 0
605#define RX_INSERT_1_BYTE 1
606#define RX_INSERT_2_BYTE 2
607#define RX_INSERT_3_BYTE 3
608
609/* GMAC DMA Rx Description Word 3 Register
610 * GMAC0 offset 0x806C
611 * GMAC1 offset 0xC06C
612 */
613union gmac_rxdesc_3 {
614 unsigned int bits32;
615 struct rxdesc_word3 {
616 /* bit 7: 0 L3 data offset */
617 unsigned int l3_offset:8;
618 /* bit 15: 8 L4 data offset */
619 unsigned int l4_offset:8;
620 /* bit 23: 16 L7 data offset */
621 unsigned int l7_offset:8;
622 /* bit 24 Duplicated ACK detected */
623 unsigned int dup_ack:1;
624 /* bit 25 abnormal case found */
625 unsigned int abnormal:1;
626 /* bit 26 IPV4 option or IPV6 extension header */
627 unsigned int option:1;
628 /* bit 27 Out of Sequence packet */
629 unsigned int out_of_seq:1;
630 /* bit 28 Control Flag is present */
631 unsigned int ctrl_flag:1;
632 /* bit 29 End of frame interrupt enable */
633 unsigned int eofie:1;
634 /* bit 31:30 11: only one, 10: first, 01: last, 00: linking */
635 unsigned int sof_eof:2;
636 } bits;
637};
638
639/* GMAC Rx Descriptor, this is simply fitted over the queue registers */
640struct gmac_rxdesc {
641 union gmac_rxdesc_0 word0;
642 union gmac_rxdesc_1 word1;
643 union gmac_rxdesc_2 word2;
644 union gmac_rxdesc_3 word3;
645};
646
647/* GMAC Matching Rule Control Register 0
648 * GMAC0 offset 0x8078
649 * GMAC1 offset 0xC078
650 */
651#define MR_L2_BIT BIT(31)
652#define MR_L3_BIT BIT(30)
653#define MR_L4_BIT BIT(29)
654#define MR_L7_BIT BIT(28)
655#define MR_PORT_BIT BIT(27)
656#define MR_PRIORITY_BIT BIT(26)
657#define MR_DA_BIT BIT(23)
658#define MR_SA_BIT BIT(22)
659#define MR_ETHER_TYPE_BIT BIT(21)
660#define MR_VLAN_BIT BIT(20)
661#define MR_PPPOE_BIT BIT(19)
662#define MR_IP_VER_BIT BIT(15)
663#define MR_IP_HDR_LEN_BIT BIT(14)
664#define MR_FLOW_LABLE_BIT BIT(13)
665#define MR_TOS_TRAFFIC_BIT BIT(12)
666#define MR_SPR_BIT(x) BIT(x)
667#define MR_SPR_BITS 0xff
668
669/* GMAC_AHB_WEIGHT registers
670 * GMAC0 offset 0x80C8
671 * GMAC1 offset 0xC0C8
672 */
673union gmac_ahb_weight {
674 unsigned int bits32;
675 struct bit_80C8 {
676 /* 4:0 */
677 unsigned int hash_weight:5;
678 /* 9:5 */
679 unsigned int rx_weight:5;
680 /* 14:10 */
681 unsigned int tx_weight:5;
682 /* 19:15 Rx Data Pre Request FIFO Threshold */
683 unsigned int pre_req:5;
684 /* 24:20 DMA TqCtrl to Start tqDV FIFO Threshold */
685 unsigned int tq_dv_threshold:5;
686 /* 31:25 */
687 unsigned int reserved:7;
688 } bits;
689};
690
691/* GMAC RX FLTR
692 * GMAC0 Offset 0xA00C
693 * GMAC1 Offset 0xE00C
694 */
695union gmac_rx_fltr {
696 unsigned int bits32;
697 struct bit1_000c {
698 /* Enable receive of unicast frames that are sent to STA
699 * address
700 */
701 unsigned int unicast:1;
702 /* Enable receive of multicast frames that pass multicast
703 * filter
704 */
705 unsigned int multicast:1;
706 /* Enable receive of broadcast frames */
707 unsigned int broadcast:1;
708 /* Enable receive of all frames */
709 unsigned int promiscuous:1;
710 /* Enable receive of all error frames */
711 unsigned int error:1;
712 unsigned int reserved:27;
713 } bits;
714};
715
716/* GMAC Configuration 0
717 * GMAC0 Offset 0xA018
718 * GMAC1 Offset 0xE018
719 */
720union gmac_config0 {
721 unsigned int bits32;
722 struct bit1_0018 {
723 /* 0: disable transmit */
724 unsigned int dis_tx:1;
725 /* 1: disable receive */
726 unsigned int dis_rx:1;
727 /* 2: transmit data loopback enable */
728 unsigned int loop_back:1;
729 /* 3: flow control also trigged by Rx queues */
730 unsigned int flow_ctrl:1;
731 /* 4-7: adjust IFG from 96+/-56 */
732 unsigned int adj_ifg:4;
733 /* 8-10 maximum receive frame length allowed */
734 unsigned int max_len:3;
735 /* 11: disable back-off function */
736 unsigned int dis_bkoff:1;
737 /* 12: disable 16 collisions abort function */
738 unsigned int dis_col:1;
739 /* 13: speed up timers in simulation */
740 unsigned int sim_test:1;
741 /* 14: RX flow control enable */
742 unsigned int rx_fc_en:1;
743 /* 15: TX flow control enable */
744 unsigned int tx_fc_en:1;
745 /* 16: RGMII in-band status enable */
746 unsigned int rgmii_en:1;
747 /* 17: IPv4 RX Checksum enable */
748 unsigned int ipv4_rx_chksum:1;
749 /* 18: IPv6 RX Checksum enable */
750 unsigned int ipv6_rx_chksum:1;
751 /* 19: Remove Rx VLAN tag */
752 unsigned int rx_tag_remove:1;
753 /* 20 */
754 unsigned int rgmm_edge:1;
755 /* 21 */
756 unsigned int rxc_inv:1;
757 /* 22 */
758 unsigned int ipv6_exthdr_order:1;
759 /* 23 */
760 unsigned int rx_err_detect:1;
761 /* 24 */
762 unsigned int port0_chk_hwq:1;
763 /* 25 */
764 unsigned int port1_chk_hwq:1;
765 /* 26 */
766 unsigned int port0_chk_toeq:1;
767 /* 27 */
768 unsigned int port1_chk_toeq:1;
769 /* 28 */
770 unsigned int port0_chk_classq:1;
771 /* 29 */
772 unsigned int port1_chk_classq:1;
773 /* 30, 31 */
774 unsigned int reserved:2;
775 } bits;
776};
777
778#define CONFIG0_TX_RX_DISABLE (BIT(1) | BIT(0))
779#define CONFIG0_RX_CHKSUM (BIT(18) | BIT(17))
780#define CONFIG0_FLOW_RX BIT(14)
781#define CONFIG0_FLOW_TX BIT(15)
782#define CONFIG0_FLOW_TX_RX (BIT(14) | BIT(15))
783#define CONFIG0_FLOW_CTL (BIT(14) | BIT(15))
784
785#define CONFIG0_MAXLEN_SHIFT 8
786#define CONFIG0_MAXLEN_MASK (7 << CONFIG0_MAXLEN_SHIFT)
787#define CONFIG0_MAXLEN_1536 0
788#define CONFIG0_MAXLEN_1518 1
789#define CONFIG0_MAXLEN_1522 2
790#define CONFIG0_MAXLEN_1542 3
791#define CONFIG0_MAXLEN_9k 4 /* 9212 */
792#define CONFIG0_MAXLEN_10k 5 /* 10236 */
793#define CONFIG0_MAXLEN_1518__6 6
794#define CONFIG0_MAXLEN_1518__7 7
795
796/* GMAC Configuration 1
797 * GMAC0 Offset 0xA01C
798 * GMAC1 Offset 0xE01C
799 */
800union gmac_config1 {
801 unsigned int bits32;
802 struct bit1_001c {
803 /* Flow control set threshold */
804 unsigned int set_threshold:8;
805 /* Flow control release threshold */
806 unsigned int rel_threshold:8;
807 unsigned int reserved:16;
808 } bits;
809};
810
811#define GMAC_FLOWCTRL_SET_MAX 32
812#define GMAC_FLOWCTRL_SET_MIN 0
813#define GMAC_FLOWCTRL_RELEASE_MAX 32
814#define GMAC_FLOWCTRL_RELEASE_MIN 0
815
816/* GMAC Configuration 2
817 * GMAC0 Offset 0xA020
818 * GMAC1 Offset 0xE020
819 */
820union gmac_config2 {
821 unsigned int bits32;
822 struct bit1_0020 {
823 /* Flow control set threshold */
824 unsigned int set_threshold:16;
825 /* Flow control release threshold */
826 unsigned int rel_threshold:16;
827 } bits;
828};
829
830/* GMAC Configuration 3
831 * GMAC0 Offset 0xA024
832 * GMAC1 Offset 0xE024
833 */
834union gmac_config3 {
835 unsigned int bits32;
836 struct bit1_0024 {
837 /* Flow control set threshold */
838 unsigned int set_threshold:16;
839 /* Flow control release threshold */
840 unsigned int rel_threshold:16;
841 } bits;
842};
843
844/* GMAC STATUS
845 * GMAC0 Offset 0xA02C
846 * GMAC1 Offset 0xE02C
847 */
848union gmac_status {
849 unsigned int bits32;
850 struct bit1_002c {
851 /* Link status */
852 unsigned int link:1;
853 /* Link speed(00->2.5M 01->25M 10->125M) */
854 unsigned int speed:2;
855 /* Duplex mode */
856 unsigned int duplex:1;
857 unsigned int reserved_1:1;
858 /* PHY interface type */
859 unsigned int mii_rmii:2;
860 unsigned int reserved_2:25;
861 } bits;
862};
863
864#define GMAC_SPEED_10 0
865#define GMAC_SPEED_100 1
866#define GMAC_SPEED_1000 2
867
868#define GMAC_PHY_MII 0
869#define GMAC_PHY_GMII 1
870#define GMAC_PHY_RGMII_100_10 2
871#define GMAC_PHY_RGMII_1000 3
872
873/* Queue Header
874 * (1) TOE Queue Header
875 * (2) Non-TOE Queue Header
876 * (3) Interrupt Queue Header
877 *
878 * memory Layout
879 * TOE Queue Header
880 * 0x60003000 +---------------------------+ 0x0000
881 * | TOE Queue 0 Header |
882 * | 8 * 4 Bytes |
883 * +---------------------------+ 0x0020
884 * | TOE Queue 1 Header |
885 * | 8 * 4 Bytes |
886 * +---------------------------+ 0x0040
887 * | ...... |
888 * | |
889 * +---------------------------+
890 *
891 * Non TOE Queue Header
892 * 0x60002000 +---------------------------+ 0x0000
893 * | Default Queue 0 Header |
894 * | 2 * 4 Bytes |
895 * +---------------------------+ 0x0008
896 * | Default Queue 1 Header |
897 * | 2 * 4 Bytes |
898 * +---------------------------+ 0x0010
899 * | Classification Queue 0 |
900 * | 2 * 4 Bytes |
901 * +---------------------------+
902 * | Classification Queue 1 |
903 * | 2 * 4 Bytes |
904 * +---------------------------+ (n * 8 + 0x10)
905 * | ... |
906 * | 2 * 4 Bytes |
907 * +---------------------------+ (13 * 8 + 0x10)
908 * | Classification Queue 13 |
909 * | 2 * 4 Bytes |
910 * +---------------------------+ 0x80
911 * | Interrupt Queue 0 |
912 * | 2 * 4 Bytes |
913 * +---------------------------+
914 * | Interrupt Queue 1 |
915 * | 2 * 4 Bytes |
916 * +---------------------------+
917 * | Interrupt Queue 2 |
918 * | 2 * 4 Bytes |
919 * +---------------------------+
920 * | Interrupt Queue 3 |
921 * | 2 * 4 Bytes |
922 * +---------------------------+
923 *
924 */
925#define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32)
926#define TOE_Q_HDR_AREA_END (TOE_QUEUE_HDR_ADDR(TOE_TOE_QUEUE_MAX + 1))
927#define TOE_DEFAULT_Q_HDR_BASE(x) (TOE_NONTOE_QUE_HDR_BASE + 0x08 * (x))
928#define TOE_CLASS_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x10)
929#define TOE_INTR_Q_HDR_BASE (TOE_NONTOE_QUE_HDR_BASE + 0x80)
930#define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8)
931#define NONTOE_Q_HDR_AREA_END (INTERRUPT_QUEUE_HDR_ADDR(TOE_INTR_QUEUE_MAX + 1))
932
933/* NONTOE Queue Header Word 0 */
934union nontoe_qhdr0 {
935 unsigned int bits32;
936 unsigned int base_size;
937};
938
939#define NONTOE_QHDR0_BASE_MASK (~0x0f)
940
941/* NONTOE Queue Header Word 1 */
942union nontoe_qhdr1 {
943 unsigned int bits32;
944 struct bit_nonqhdr1 {
945 /* bit 15:0 */
946 unsigned int rptr:16;
947 /* bit 31:16 */
948 unsigned int wptr:16;
949 } bits;
950};
951
952/* Non-TOE Queue Header */
953struct nontoe_qhdr {
954 union nontoe_qhdr0 word0;
955 union nontoe_qhdr1 word1;
956};
957
958#endif /* _GEMINI_ETHERNET_H */