aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 17:54:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 17:54:26 -0500
commitc2714334b944abbeaaadda8cddde619eff0292a1 (patch)
treeb45be97a313f58aa62933040230d51aa3a8592b4 /drivers/net
parent0beb58783f2168354e2b5297af45fc7db70adf12 (diff)
parent5e5d8999a316d596f2012fe1cf4c59e0de693dab (diff)
Merge tag 'mvebu' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC updates for Marvell mvebu/kirkwood from Olof Johansson: "This is a branch with updates for Marvell's mvebu/kirkwood platforms. They came in late-ish, and were heavily interdependent such that it didn't make sense to split them up across the cross-platform topic branches. So here they are (for the second release in a row) in a branch on their own." * tag 'mvebu' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (88 commits) arm: l2x0: add aurora related properties to OF binding arm: mvebu: add Aurora L2 Cache Controller to the DT arm: mvebu: add L2 cache support dma: mv_xor: fix error handling path dma: mv_xor: fix error checking of irq_of_parse_and_map() dma: mv_xor: use request_irq() instead of devm_request_irq() dma: mv_xor: clear the window override control registers arm: mvebu: fix address decoding armada_cfg_base() function ARM: mvebu: update defconfig with I2C and RTC support ARM: mvebu: Add SATA support for OpenBlocks AX3-4 ARM: mvebu: Add support for the RTC in OpenBlocks AX3-4 ARM: mvebu: Add support for I2C on OpenBlocks AX3-4 ARM: mvebu: Add support for I2C controllers in Armada 370/XP arm: mvebu: Add hardware I/O Coherency support arm: plat-orion: Add coherency attribute when setup mbus target arm: dma mapping: Export a dma ops function arm_dma_set_mask arm: mvebu: Add SMP support for Armada XP arm: mm: Add support for PJ4B cpu and init routines arm: mvebu: Add IPI support via doorbells arm: mvebu: Add initial support for power managmement service unit ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig24
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c228
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2848
4 files changed, 3102 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 0029934748bc..edfba9370922 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -31,6 +31,30 @@ config MV643XX_ETH
31 Some boards that use the Discovery chipset are the Momenco 31 Some boards that use the Discovery chipset are the Momenco
32 Ocelot C and Jaguar ATX and Pegasos II. 32 Ocelot C and Jaguar ATX and Pegasos II.
33 33
34config MVMDIO
35 tristate "Marvell MDIO interface support"
36 ---help---
37 This driver supports the MDIO interface found in the network
38 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
39 Dove, Armada 370 and Armada XP).
40
41 For now, this driver is only needed for the MVNETA driver
42 (used on Armada 370 and XP), but it could be used in the
43 future by the MV643XX_ETH driver.
44
45config MVNETA
46 tristate "Marvell Armada 370/XP network interface support"
47 depends on MACH_ARMADA_370_XP
48 select PHYLIB
49 select MVMDIO
50 ---help---
51 This driver supports the network interface units in the
52 Marvell ARMADA XP and ARMADA 370 SoC family.
53
54 Note that this driver is distinct from the mv643xx_eth
55 driver, which should be used for the older Marvell SoCs
56 (Dove, Orion, Discovery, Kirkwood).
57
34config PXA168_ETH 58config PXA168_ETH
35 tristate "Marvell pxa168 ethernet support" 59 tristate "Marvell pxa168 ethernet support"
36 depends on CPU_PXA168 60 depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 57e3234a37ba..7f63b4aac434 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -3,6 +3,8 @@
3# 3#
4 4
5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
6obj-$(CONFIG_MVMDIO) += mvmdio.o
7obj-$(CONFIG_MVNETA) += mvneta.o
6obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o 8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
7obj-$(CONFIG_SKGE) += skge.o 9obj-$(CONFIG_SKGE) += skge.o
8obj-$(CONFIG_SKY2) += sky2.o 10obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 000000000000..6d6002bab060
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,228 @@
1/*
2 * Driver for the MDIO interface of Marvell network interfaces.
3 *
4 * Since the MDIO interface of Marvell network interfaces is shared
5 * between all network interfaces, having a single driver allows to
6 * handle concurrent accesses properly (you may have four Ethernet
7 * ports, but they in fact share the same SMI interface to access the
8 * MDIO bus). Moreover, this MDIO interface code is similar between
9 * the mv643xx_eth driver and the mvneta driver. For now, it is only
10 * used by the mvneta driver, but it could later be used by the
11 * mv643xx_eth driver as well.
12 *
13 * Copyright (C) 2012 Marvell
14 *
15 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/phy.h>
27#include <linux/of_address.h>
28#include <linux/of_mdio.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31
32#define MVMDIO_SMI_DATA_SHIFT 0
33#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
34#define MVMDIO_SMI_PHY_REG_SHIFT 21
35#define MVMDIO_SMI_READ_OPERATION BIT(26)
36#define MVMDIO_SMI_WRITE_OPERATION 0
37#define MVMDIO_SMI_READ_VALID BIT(27)
38#define MVMDIO_SMI_BUSY BIT(28)
39
40struct orion_mdio_dev {
41 struct mutex lock;
42 void __iomem *smireg;
43};
44
45/* Wait for the SMI unit to be ready for another operation
46 */
47static int orion_mdio_wait_ready(struct mii_bus *bus)
48{
49 struct orion_mdio_dev *dev = bus->priv;
50 int count;
51 u32 val;
52
53 count = 0;
54 while (1) {
55 val = readl(dev->smireg);
56 if (!(val & MVMDIO_SMI_BUSY))
57 break;
58
59 if (count > 100) {
60 dev_err(bus->parent, "Timeout: SMI busy for too long\n");
61 return -ETIMEDOUT;
62 }
63
64 udelay(10);
65 count++;
66 }
67
68 return 0;
69}
70
71static int orion_mdio_read(struct mii_bus *bus, int mii_id,
72 int regnum)
73{
74 struct orion_mdio_dev *dev = bus->priv;
75 int count;
76 u32 val;
77 int ret;
78
79 mutex_lock(&dev->lock);
80
81 ret = orion_mdio_wait_ready(bus);
82 if (ret < 0) {
83 mutex_unlock(&dev->lock);
84 return ret;
85 }
86
87 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
88 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
89 MVMDIO_SMI_READ_OPERATION),
90 dev->smireg);
91
92 /* Wait for the value to become available */
93 count = 0;
94 while (1) {
95 val = readl(dev->smireg);
96 if (val & MVMDIO_SMI_READ_VALID)
97 break;
98
99 if (count > 100) {
100 dev_err(bus->parent, "Timeout when reading PHY\n");
101 mutex_unlock(&dev->lock);
102 return -ETIMEDOUT;
103 }
104
105 udelay(10);
106 count++;
107 }
108
109 mutex_unlock(&dev->lock);
110
111 return val & 0xFFFF;
112}
113
114static int orion_mdio_write(struct mii_bus *bus, int mii_id,
115 int regnum, u16 value)
116{
117 struct orion_mdio_dev *dev = bus->priv;
118 int ret;
119
120 mutex_lock(&dev->lock);
121
122 ret = orion_mdio_wait_ready(bus);
123 if (ret < 0) {
124 mutex_unlock(&dev->lock);
125 return ret;
126 }
127
128 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
129 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
130 MVMDIO_SMI_WRITE_OPERATION |
131 (value << MVMDIO_SMI_DATA_SHIFT)),
132 dev->smireg);
133
134 mutex_unlock(&dev->lock);
135
136 return 0;
137}
138
139static int orion_mdio_reset(struct mii_bus *bus)
140{
141 return 0;
142}
143
144static int __devinit orion_mdio_probe(struct platform_device *pdev)
145{
146 struct device_node *np = pdev->dev.of_node;
147 struct mii_bus *bus;
148 struct orion_mdio_dev *dev;
149 int i, ret;
150
151 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
152 if (!bus) {
153 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
154 return -ENOMEM;
155 }
156
157 bus->name = "orion_mdio_bus";
158 bus->read = orion_mdio_read;
159 bus->write = orion_mdio_write;
160 bus->reset = orion_mdio_reset;
161 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
162 dev_name(&pdev->dev));
163 bus->parent = &pdev->dev;
164
165 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
166 if (!bus->irq) {
167 dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n");
168 mdiobus_free(bus);
169 return -ENOMEM;
170 }
171
172 for (i = 0; i < PHY_MAX_ADDR; i++)
173 bus->irq[i] = PHY_POLL;
174
175 dev = bus->priv;
176 dev->smireg = of_iomap(pdev->dev.of_node, 0);
177 if (!dev->smireg) {
178 dev_err(&pdev->dev, "No SMI register address given in DT\n");
179 kfree(bus->irq);
180 mdiobus_free(bus);
181 return -ENODEV;
182 }
183
184 mutex_init(&dev->lock);
185
186 ret = of_mdiobus_register(bus, np);
187 if (ret < 0) {
188 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
189 iounmap(dev->smireg);
190 kfree(bus->irq);
191 mdiobus_free(bus);
192 return ret;
193 }
194
195 platform_set_drvdata(pdev, bus);
196
197 return 0;
198}
199
200static int __devexit orion_mdio_remove(struct platform_device *pdev)
201{
202 struct mii_bus *bus = platform_get_drvdata(pdev);
203 mdiobus_unregister(bus);
204 kfree(bus->irq);
205 mdiobus_free(bus);
206 return 0;
207}
208
209static const struct of_device_id orion_mdio_match[] = {
210 { .compatible = "marvell,orion-mdio" },
211 { }
212};
213MODULE_DEVICE_TABLE(of, orion_mdio_match);
214
215static struct platform_driver orion_mdio_driver = {
216 .probe = orion_mdio_probe,
217 .remove = __devexit_p(orion_mdio_remove),
218 .driver = {
219 .name = "orion-mdio",
220 .of_match_table = orion_mdio_match,
221 },
222};
223
224module_platform_driver(orion_mdio_driver);
225
226MODULE_DESCRIPTION("Marvell MDIO interface driver");
227MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
228MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 000000000000..3f8086b9f5e5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,2848 @@
1/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/version.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/platform_device.h>
19#include <linux/skbuff.h>
20#include <linux/inetdevice.h>
21#include <linux/mbus.h>
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26#include <linux/of.h>
27#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
30#include <linux/of_address.h>
31#include <linux/phy.h>
32#include <linux/clk.h>
33
34/* Registers */
35#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
36#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
37#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
38#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
39#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
40#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
41#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
42#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
43#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
44#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
45#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
46#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
47#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
48#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
49#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
50#define MVNETA_PORT_RX_RESET 0x1cc0
51#define MVNETA_PORT_RX_DMA_RESET BIT(0)
52#define MVNETA_PHY_ADDR 0x2000
53#define MVNETA_PHY_ADDR_MASK 0x1f
54#define MVNETA_MBUS_RETRY 0x2010
55#define MVNETA_UNIT_INTR_CAUSE 0x2080
56#define MVNETA_UNIT_CONTROL 0x20B0
57#define MVNETA_PHY_POLLING_ENABLE BIT(1)
58#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
59#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
60#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
61#define MVNETA_BASE_ADDR_ENABLE 0x2290
62#define MVNETA_PORT_CONFIG 0x2400
63#define MVNETA_UNI_PROMISC_MODE BIT(0)
64#define MVNETA_DEF_RXQ(q) ((q) << 1)
65#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
66#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
67#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
68#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
69#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
70#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
71#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
72 MVNETA_DEF_RXQ_ARP(q) | \
73 MVNETA_DEF_RXQ_TCP(q) | \
74 MVNETA_DEF_RXQ_UDP(q) | \
75 MVNETA_DEF_RXQ_BPDU(q) | \
76 MVNETA_TX_UNSET_ERR_SUM | \
77 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
78#define MVNETA_PORT_CONFIG_EXTEND 0x2404
79#define MVNETA_MAC_ADDR_LOW 0x2414
80#define MVNETA_MAC_ADDR_HIGH 0x2418
81#define MVNETA_SDMA_CONFIG 0x241c
82#define MVNETA_SDMA_BRST_SIZE_16 4
83#define MVNETA_NO_DESC_SWAP 0x0
84#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
85#define MVNETA_RX_NO_DATA_SWAP BIT(4)
86#define MVNETA_TX_NO_DATA_SWAP BIT(5)
87#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
88#define MVNETA_PORT_STATUS 0x2444
89#define MVNETA_TX_IN_PRGRS BIT(1)
90#define MVNETA_TX_FIFO_EMPTY BIT(8)
91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92#define MVNETA_TYPE_PRIO 0x24bc
93#define MVNETA_FORCE_UNI BIT(21)
94#define MVNETA_TXQ_CMD_1 0x24e4
95#define MVNETA_TXQ_CMD 0x2448
96#define MVNETA_TXQ_DISABLE_SHIFT 8
97#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
98#define MVNETA_ACC_MODE 0x2500
99#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
100#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
101#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
102#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
103#define MVNETA_INTR_NEW_CAUSE 0x25a0
104#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
105#define MVNETA_INTR_NEW_MASK 0x25a4
106#define MVNETA_INTR_OLD_CAUSE 0x25a8
107#define MVNETA_INTR_OLD_MASK 0x25ac
108#define MVNETA_INTR_MISC_CAUSE 0x25b0
109#define MVNETA_INTR_MISC_MASK 0x25b4
110#define MVNETA_INTR_ENABLE 0x25b8
111#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
112#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
113#define MVNETA_RXQ_CMD 0x2680
114#define MVNETA_RXQ_DISABLE_SHIFT 8
115#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
116#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
117#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
118#define MVNETA_GMAC_CTRL_0 0x2c00
119#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
120#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
121#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
122#define MVNETA_GMAC_CTRL_2 0x2c08
123#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
124#define MVNETA_GMAC2_PORT_RGMII BIT(4)
125#define MVNETA_GMAC2_PORT_RESET BIT(6)
126#define MVNETA_GMAC_STATUS 0x2c10
127#define MVNETA_GMAC_LINK_UP BIT(0)
128#define MVNETA_GMAC_SPEED_1000 BIT(1)
129#define MVNETA_GMAC_SPEED_100 BIT(2)
130#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
131#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
132#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
133#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
134#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
135#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
136#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
137#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
138#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
139#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
140#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
141#define MVNETA_MIB_COUNTERS_BASE 0x3080
142#define MVNETA_MIB_LATE_COLLISION 0x7c
143#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
144#define MVNETA_DA_FILT_OTH_MCAST 0x3500
145#define MVNETA_DA_FILT_UCAST_BASE 0x3600
146#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
147#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
148#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
149#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
150#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
151#define MVNETA_TXQ_DEC_SENT_SHIFT 16
152#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
153#define MVNETA_TXQ_SENT_DESC_SHIFT 16
154#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
155#define MVNETA_PORT_TX_RESET 0x3cf0
156#define MVNETA_PORT_TX_DMA_RESET BIT(0)
157#define MVNETA_TX_MTU 0x3e0c
158#define MVNETA_TX_TOKEN_SIZE 0x3e14
159#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
160#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
161#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
162
163#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
164
165/* Descriptor ring Macros */
166#define MVNETA_QUEUE_NEXT_DESC(q, index) \
167 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
168
169/* Various constants */
170
171/* Coalescing */
172#define MVNETA_TXDONE_COAL_PKTS 16
173#define MVNETA_RX_COAL_PKTS 32
174#define MVNETA_RX_COAL_USEC 100
175
176/* Timer */
177#define MVNETA_TX_DONE_TIMER_PERIOD 10
178
179/* Napi polling weight */
180#define MVNETA_RX_POLL_WEIGHT 64
181
182/* The two bytes Marvell header. Either contains a special value used
183 * by Marvell switches when a specific hardware mode is enabled (not
184 * supported by this driver) or is filled automatically by zeroes on
185 * the RX side. Those two bytes being at the front of the Ethernet
186 * header, they allow to have the IP header aligned on a 4 bytes
187 * boundary automatically: the hardware skips those two bytes on its
188 * own.
189 */
190#define MVNETA_MH_SIZE 2
191
192#define MVNETA_VLAN_TAG_LEN 4
193
194#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
195#define MVNETA_TX_CSUM_MAX_SIZE 9800
196#define MVNETA_ACC_MODE_EXT 1
197
198/* Timeout constants */
199#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
200#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
201#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
202
203#define MVNETA_TX_MTU_MAX 0x3ffff
204
205/* Max number of Rx descriptors */
206#define MVNETA_MAX_RXD 128
207
208/* Max number of Tx descriptors */
209#define MVNETA_MAX_TXD 532
210
211/* descriptor aligned size */
212#define MVNETA_DESC_ALIGNED_SIZE 32
213
214#define MVNETA_RX_PKT_SIZE(mtu) \
215 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
216 ETH_HLEN + ETH_FCS_LEN, \
217 MVNETA_CPU_D_CACHE_LINE_SIZE)
218
219#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
220
221struct mvneta_stats {
222 struct u64_stats_sync syncp;
223 u64 packets;
224 u64 bytes;
225};
226
227struct mvneta_port {
228 int pkt_size;
229 void __iomem *base;
230 struct mvneta_rx_queue *rxqs;
231 struct mvneta_tx_queue *txqs;
232 struct timer_list tx_done_timer;
233 struct net_device *dev;
234
235 u32 cause_rx_tx;
236 struct napi_struct napi;
237
238 /* Flags */
239 unsigned long flags;
240#define MVNETA_F_TX_DONE_TIMER_BIT 0
241
242 /* Napi weight */
243 int weight;
244
245 /* Core clock */
246 struct clk *clk;
247 u8 mcast_count[256];
248 u16 tx_ring_size;
249 u16 rx_ring_size;
250 struct mvneta_stats tx_stats;
251 struct mvneta_stats rx_stats;
252
253 struct mii_bus *mii_bus;
254 struct phy_device *phy_dev;
255 phy_interface_t phy_interface;
256 struct device_node *phy_node;
257 unsigned int link;
258 unsigned int duplex;
259 unsigned int speed;
260};
261
262/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
263 * layout of the transmit and reception DMA descriptors, and their
264 * layout is therefore defined by the hardware design
265 */
266struct mvneta_tx_desc {
267 u32 command; /* Options used by HW for packet transmitting.*/
268#define MVNETA_TX_L3_OFF_SHIFT 0
269#define MVNETA_TX_IP_HLEN_SHIFT 8
270#define MVNETA_TX_L4_UDP BIT(16)
271#define MVNETA_TX_L3_IP6 BIT(17)
272#define MVNETA_TXD_IP_CSUM BIT(18)
273#define MVNETA_TXD_Z_PAD BIT(19)
274#define MVNETA_TXD_L_DESC BIT(20)
275#define MVNETA_TXD_F_DESC BIT(21)
276#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
277 MVNETA_TXD_L_DESC | \
278 MVNETA_TXD_F_DESC)
279#define MVNETA_TX_L4_CSUM_FULL BIT(30)
280#define MVNETA_TX_L4_CSUM_NOT BIT(31)
281
282 u16 reserverd1; /* csum_l4 (for future use) */
283 u16 data_size; /* Data size of transmitted packet in bytes */
284 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
285 u32 reserved2; /* hw_cmd - (for future use, PMT) */
286 u32 reserved3[4]; /* Reserved - (for future use) */
287};
288
289struct mvneta_rx_desc {
290 u32 status; /* Info about received packet */
291#define MVNETA_RXD_ERR_CRC 0x0
292#define MVNETA_RXD_ERR_SUMMARY BIT(16)
293#define MVNETA_RXD_ERR_OVERRUN BIT(17)
294#define MVNETA_RXD_ERR_LEN BIT(18)
295#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
296#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
297#define MVNETA_RXD_L3_IP4 BIT(25)
298#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
299#define MVNETA_RXD_L4_CSUM_OK BIT(30)
300
301 u16 reserved1; /* pnc_info - (for future use, PnC) */
302 u16 data_size; /* Size of received packet in bytes */
303 u32 buf_phys_addr; /* Physical address of the buffer */
304 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
305 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
306 u16 reserved3; /* prefetch_cmd, for future use */
307 u16 reserved4; /* csum_l4 - (for future use, PnC) */
308 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
309 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
310};
311
312struct mvneta_tx_queue {
313 /* Number of this TX queue, in the range 0-7 */
314 u8 id;
315
316 /* Number of TX DMA descriptors in the descriptor ring */
317 int size;
318
319 /* Number of currently used TX DMA descriptor in the
320 * descriptor ring
321 */
322 int count;
323
324 /* Array of transmitted skb */
325 struct sk_buff **tx_skb;
326
327 /* Index of last TX DMA descriptor that was inserted */
328 int txq_put_index;
329
330 /* Index of the TX DMA descriptor to be cleaned up */
331 int txq_get_index;
332
333 u32 done_pkts_coal;
334
335 /* Virtual address of the TX DMA descriptors array */
336 struct mvneta_tx_desc *descs;
337
338 /* DMA address of the TX DMA descriptors array */
339 dma_addr_t descs_phys;
340
341 /* Index of the last TX DMA descriptor */
342 int last_desc;
343
344 /* Index of the next TX DMA descriptor to process */
345 int next_desc_to_proc;
346};
347
348struct mvneta_rx_queue {
349 /* rx queue number, in the range 0-7 */
350 u8 id;
351
352 /* num of rx descriptors in the rx descriptor ring */
353 int size;
354
355 /* counter of times when mvneta_refill() failed */
356 int missed;
357
358 u32 pkts_coal;
359 u32 time_coal;
360
361 /* Virtual address of the RX DMA descriptors array */
362 struct mvneta_rx_desc *descs;
363
364 /* DMA address of the RX DMA descriptors array */
365 dma_addr_t descs_phys;
366
367 /* Index of the last RX DMA descriptor */
368 int last_desc;
369
370 /* Index of the next RX DMA descriptor to process */
371 int next_desc_to_proc;
372};
373
374static int rxq_number = 8;
375static int txq_number = 8;
376
377static int rxq_def;
378static int txq_def;
379
380#define MVNETA_DRIVER_NAME "mvneta"
381#define MVNETA_DRIVER_VERSION "1.0"
382
383/* Utility/helper methods */
384
385/* Write helper method */
386static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
387{
388 writel(data, pp->base + offset);
389}
390
391/* Read helper method */
392static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
393{
394 return readl(pp->base + offset);
395}
396
397/* Increment txq get counter */
398static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
399{
400 txq->txq_get_index++;
401 if (txq->txq_get_index == txq->size)
402 txq->txq_get_index = 0;
403}
404
405/* Increment txq put counter */
406static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
407{
408 txq->txq_put_index++;
409 if (txq->txq_put_index == txq->size)
410 txq->txq_put_index = 0;
411}
412
413
414/* Clear all MIB counters */
415static void mvneta_mib_counters_clear(struct mvneta_port *pp)
416{
417 int i;
418 u32 dummy;
419
420 /* Perform dummy reads from MIB counters */
421 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
422 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
423}
424
425/* Get System Network Statistics */
426struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
427 struct rtnl_link_stats64 *stats)
428{
429 struct mvneta_port *pp = netdev_priv(dev);
430 unsigned int start;
431
432 memset(stats, 0, sizeof(struct rtnl_link_stats64));
433
434 do {
435 start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
436 stats->rx_packets = pp->rx_stats.packets;
437 stats->rx_bytes = pp->rx_stats.bytes;
438 } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
439
440
441 do {
442 start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
443 stats->tx_packets = pp->tx_stats.packets;
444 stats->tx_bytes = pp->tx_stats.bytes;
445 } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
446
447 stats->rx_errors = dev->stats.rx_errors;
448 stats->rx_dropped = dev->stats.rx_dropped;
449
450 stats->tx_dropped = dev->stats.tx_dropped;
451
452 return stats;
453}
454
455/* Rx descriptors helper methods */
456
457/* Checks whether the given RX descriptor is both the first and the
458 * last descriptor for the RX packet. Each RX packet is currently
459 * received through a single RX descriptor, so not having each RX
460 * descriptor with its first and last bits set is an error
461 */
462static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
463{
464 return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
465 MVNETA_RXD_FIRST_LAST_DESC;
466}
467
468/* Add number of descriptors ready to receive new packets */
469static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
470 struct mvneta_rx_queue *rxq,
471 int ndescs)
472{
473 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
474 * be added at once
475 */
476 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
477 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
478 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
479 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
480 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
481 }
482
483 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
484 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
485}
486
487/* Get number of RX descriptors occupied by received packets */
488static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
489 struct mvneta_rx_queue *rxq)
490{
491 u32 val;
492
493 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
494 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
495}
496
497/* Update num of rx desc called upon return from rx path or
498 * from mvneta_rxq_drop_pkts().
499 */
500static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
501 struct mvneta_rx_queue *rxq,
502 int rx_done, int rx_filled)
503{
504 u32 val;
505
506 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
507 val = rx_done |
508 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
509 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
510 return;
511 }
512
513 /* Only 255 descriptors can be added at once */
514 while ((rx_done > 0) || (rx_filled > 0)) {
515 if (rx_done <= 0xff) {
516 val = rx_done;
517 rx_done = 0;
518 } else {
519 val = 0xff;
520 rx_done -= 0xff;
521 }
522 if (rx_filled <= 0xff) {
523 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
524 rx_filled = 0;
525 } else {
526 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
527 rx_filled -= 0xff;
528 }
529 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
530 }
531}
532
533/* Get pointer to next RX descriptor to be processed by SW */
534static struct mvneta_rx_desc *
535mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
536{
537 int rx_desc = rxq->next_desc_to_proc;
538
539 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
540 return rxq->descs + rx_desc;
541}
542
543/* Change maximum receive size of the port. */
544static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
545{
546 u32 val;
547
548 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
549 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
550 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
551 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
552 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
553}
554
555
556/* Set rx queue offset */
557static void mvneta_rxq_offset_set(struct mvneta_port *pp,
558 struct mvneta_rx_queue *rxq,
559 int offset)
560{
561 u32 val;
562
563 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
564 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
565
566 /* Offset is in */
567 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
568 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
569}
570
571
572/* Tx descriptors helper methods */
573
574/* Update HW with number of TX descriptors to be sent */
575static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
576 struct mvneta_tx_queue *txq,
577 int pend_desc)
578{
579 u32 val;
580
581 /* Only 255 descriptors can be added at once ; Assume caller
582 * process TX desriptors in quanta less than 256
583 */
584 val = pend_desc;
585 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
586}
587
588/* Get pointer to next TX descriptor to be processed (send) by HW */
589static struct mvneta_tx_desc *
590mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
591{
592 int tx_desc = txq->next_desc_to_proc;
593
594 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
595 return txq->descs + tx_desc;
596}
597
598/* Release the last allocated TX descriptor. Useful to handle DMA
599 * mapping failures in the TX path.
600 */
601static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
602{
603 if (txq->next_desc_to_proc == 0)
604 txq->next_desc_to_proc = txq->last_desc - 1;
605 else
606 txq->next_desc_to_proc--;
607}
608
609/* Set rxq buf size */
610static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
611 struct mvneta_rx_queue *rxq,
612 int buf_size)
613{
614 u32 val;
615
616 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
617
618 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
619 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
620
621 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
622}
623
624/* Disable buffer management (BM) */
625static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
626 struct mvneta_rx_queue *rxq)
627{
628 u32 val;
629
630 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
631 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
632 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
633}
634
635
636
637/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
638static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
639{
640 u32 val;
641
642 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
643
644 if (enable)
645 val |= MVNETA_GMAC2_PORT_RGMII;
646 else
647 val &= ~MVNETA_GMAC2_PORT_RGMII;
648
649 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
650}
651
652/* Config SGMII port */
653static void __devinit mvneta_port_sgmii_config(struct mvneta_port *pp)
654{
655 u32 val;
656
657 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
658 val |= MVNETA_GMAC2_PSC_ENABLE;
659 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
660}
661
662/* Start the Ethernet port RX and TX activity */
663static void mvneta_port_up(struct mvneta_port *pp)
664{
665 int queue;
666 u32 q_map;
667
668 /* Enable all initialized TXs. */
669 mvneta_mib_counters_clear(pp);
670 q_map = 0;
671 for (queue = 0; queue < txq_number; queue++) {
672 struct mvneta_tx_queue *txq = &pp->txqs[queue];
673 if (txq->descs != NULL)
674 q_map |= (1 << queue);
675 }
676 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
677
678 /* Enable all initialized RXQs. */
679 q_map = 0;
680 for (queue = 0; queue < rxq_number; queue++) {
681 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
682 if (rxq->descs != NULL)
683 q_map |= (1 << queue);
684 }
685
686 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
687}
688
689/* Stop the Ethernet port activity */
690static void mvneta_port_down(struct mvneta_port *pp)
691{
692 u32 val;
693 int count;
694
695 /* Stop Rx port activity. Check port Rx activity. */
696 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
697
698 /* Issue stop command for active channels only */
699 if (val != 0)
700 mvreg_write(pp, MVNETA_RXQ_CMD,
701 val << MVNETA_RXQ_DISABLE_SHIFT);
702
703 /* Wait for all Rx activity to terminate. */
704 count = 0;
705 do {
706 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
707 netdev_warn(pp->dev,
708 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
709 val);
710 break;
711 }
712 mdelay(1);
713
714 val = mvreg_read(pp, MVNETA_RXQ_CMD);
715 } while (val & 0xff);
716
717 /* Stop Tx port activity. Check port Tx activity. Issue stop
718 * command for active channels only
719 */
720 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
721
722 if (val != 0)
723 mvreg_write(pp, MVNETA_TXQ_CMD,
724 (val << MVNETA_TXQ_DISABLE_SHIFT));
725
726 /* Wait for all Tx activity to terminate. */
727 count = 0;
728 do {
729 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
730 netdev_warn(pp->dev,
731 "TIMEOUT for TX stopped status=0x%08x\n",
732 val);
733 break;
734 }
735 mdelay(1);
736
737 /* Check TX Command reg that all Txqs are stopped */
738 val = mvreg_read(pp, MVNETA_TXQ_CMD);
739
740 } while (val & 0xff);
741
742 /* Double check to verify that TX FIFO is empty */
743 count = 0;
744 do {
745 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
746 netdev_warn(pp->dev,
747 "TX FIFO empty timeout status=0x08%x\n",
748 val);
749 break;
750 }
751 mdelay(1);
752
753 val = mvreg_read(pp, MVNETA_PORT_STATUS);
754 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
755 (val & MVNETA_TX_IN_PRGRS));
756
757 udelay(200);
758}
759
760/* Enable the port by setting the port enable bit of the MAC control register */
761static void mvneta_port_enable(struct mvneta_port *pp)
762{
763 u32 val;
764
765 /* Enable port */
766 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
767 val |= MVNETA_GMAC0_PORT_ENABLE;
768 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
769}
770
771/* Disable the port and wait for about 200 usec before retuning */
772static void mvneta_port_disable(struct mvneta_port *pp)
773{
774 u32 val;
775
776 /* Reset the Enable bit in the Serial Control Register */
777 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
778 val &= ~MVNETA_GMAC0_PORT_ENABLE;
779 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
780
781 udelay(200);
782}
783
784/* Multicast tables methods */
785
786/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
787static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
788{
789 int offset;
790 u32 val;
791
792 if (queue == -1) {
793 val = 0;
794 } else {
795 val = 0x1 | (queue << 1);
796 val |= (val << 24) | (val << 16) | (val << 8);
797 }
798
799 for (offset = 0; offset <= 0xc; offset += 4)
800 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
801}
802
803/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
804static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
805{
806 int offset;
807 u32 val;
808
809 if (queue == -1) {
810 val = 0;
811 } else {
812 val = 0x1 | (queue << 1);
813 val |= (val << 24) | (val << 16) | (val << 8);
814 }
815
816 for (offset = 0; offset <= 0xfc; offset += 4)
817 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
818
819}
820
821/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
822static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
823{
824 int offset;
825 u32 val;
826
827 if (queue == -1) {
828 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
829 val = 0;
830 } else {
831 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
832 val = 0x1 | (queue << 1);
833 val |= (val << 24) | (val << 16) | (val << 8);
834 }
835
836 for (offset = 0; offset <= 0xfc; offset += 4)
837 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
838}
839
840/* This method sets defaults to the NETA port:
841 * Clears interrupt Cause and Mask registers.
842 * Clears all MAC tables.
843 * Sets defaults to all registers.
844 * Resets RX and TX descriptor rings.
845 * Resets PHY.
846 * This method can be called after mvneta_port_down() to return the port
847 * settings to defaults.
848 */
849static void mvneta_defaults_set(struct mvneta_port *pp)
850{
851 int cpu;
852 int queue;
853 u32 val;
854
855 /* Clear all Cause registers */
856 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
857 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
858 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
859
860 /* Mask all interrupts */
861 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
862 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
863 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
864 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
865
866 /* Enable MBUS Retry bit16 */
867 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
868
869 /* Set CPU queue access map - all CPUs have access to all RX
870 * queues and to all TX queues
871 */
872 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
873 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
874 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
875 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
876
877 /* Reset RX and TX DMAs */
878 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
879 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
880
881 /* Disable Legacy WRR, Disable EJP, Release from reset */
882 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
883 for (queue = 0; queue < txq_number; queue++) {
884 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
885 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
886 }
887
888 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
889 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
890
891 /* Set Port Acceleration Mode */
892 val = MVNETA_ACC_MODE_EXT;
893 mvreg_write(pp, MVNETA_ACC_MODE, val);
894
895 /* Update val of portCfg register accordingly with all RxQueue types */
896 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
897 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
898
899 val = 0;
900 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
901 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
902
903 /* Build PORT_SDMA_CONFIG_REG */
904 val = 0;
905
906 /* Default burst size */
907 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
908 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
909
910 val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
911 MVNETA_NO_DESC_SWAP);
912
913 /* Assign port SDMA configuration */
914 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
915
916 mvneta_set_ucast_table(pp, -1);
917 mvneta_set_special_mcast_table(pp, -1);
918 mvneta_set_other_mcast_table(pp, -1);
919
920 /* Set port interrupt enable register - default enable all */
921 mvreg_write(pp, MVNETA_INTR_ENABLE,
922 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
923 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
924}
925
926/* Set max sizes for tx queues */
927static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
928
929{
930 u32 val, size, mtu;
931 int queue;
932
933 mtu = max_tx_size * 8;
934 if (mtu > MVNETA_TX_MTU_MAX)
935 mtu = MVNETA_TX_MTU_MAX;
936
937 /* Set MTU */
938 val = mvreg_read(pp, MVNETA_TX_MTU);
939 val &= ~MVNETA_TX_MTU_MAX;
940 val |= mtu;
941 mvreg_write(pp, MVNETA_TX_MTU, val);
942
943 /* TX token size and all TXQs token size must be larger that MTU */
944 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
945
946 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
947 if (size < mtu) {
948 size = mtu;
949 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
950 val |= size;
951 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
952 }
953 for (queue = 0; queue < txq_number; queue++) {
954 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
955
956 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
957 if (size < mtu) {
958 size = mtu;
959 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
960 val |= size;
961 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
962 }
963 }
964}
965
966/* Set unicast address */
967static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
968 int queue)
969{
970 unsigned int unicast_reg;
971 unsigned int tbl_offset;
972 unsigned int reg_offset;
973
974 /* Locate the Unicast table entry */
975 last_nibble = (0xf & last_nibble);
976
977 /* offset from unicast tbl base */
978 tbl_offset = (last_nibble / 4) * 4;
979
980 /* offset within the above reg */
981 reg_offset = last_nibble % 4;
982
983 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
984
985 if (queue == -1) {
986 /* Clear accepts frame bit at specified unicast DA tbl entry */
987 unicast_reg &= ~(0xff << (8 * reg_offset));
988 } else {
989 unicast_reg &= ~(0xff << (8 * reg_offset));
990 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
991 }
992
993 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
994}
995
996/* Set mac address */
997static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
998 int queue)
999{
1000 unsigned int mac_h;
1001 unsigned int mac_l;
1002
1003 if (queue != -1) {
1004 mac_l = (addr[4] << 8) | (addr[5]);
1005 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1006 (addr[2] << 8) | (addr[3] << 0);
1007
1008 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1009 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1010 }
1011
1012 /* Accept frames of this address */
1013 mvneta_set_ucast_addr(pp, addr[5], queue);
1014}
1015
1016/* Set the number of packets that will be received before RX interrupt
1017 * will be generated by HW.
1018 */
1019static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1020 struct mvneta_rx_queue *rxq, u32 value)
1021{
1022 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1023 value | MVNETA_RXQ_NON_OCCUPIED(0));
1024 rxq->pkts_coal = value;
1025}
1026
1027/* Set the time delay in usec before RX interrupt will be generated by
1028 * HW.
1029 */
1030static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1031 struct mvneta_rx_queue *rxq, u32 value)
1032{
1033 u32 val;
1034 unsigned long clk_rate;
1035
1036 clk_rate = clk_get_rate(pp->clk);
1037 val = (clk_rate / 1000000) * value;
1038
1039 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1040 rxq->time_coal = value;
1041}
1042
1043/* Set threshold for TX_DONE pkts coalescing */
1044static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1045 struct mvneta_tx_queue *txq, u32 value)
1046{
1047 u32 val;
1048
1049 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1050
1051 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1052 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1053
1054 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1055
1056 txq->done_pkts_coal = value;
1057}
1058
1059/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1060static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
1061{
1062 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
1063 pp->tx_done_timer.expires = jiffies +
1064 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
1065 add_timer(&pp->tx_done_timer);
1066 }
1067}
1068
1069
1070/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1071static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1072 u32 phys_addr, u32 cookie)
1073{
1074 rx_desc->buf_cookie = cookie;
1075 rx_desc->buf_phys_addr = phys_addr;
1076}
1077
1078/* Decrement sent descriptors counter */
1079static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1080 struct mvneta_tx_queue *txq,
1081 int sent_desc)
1082{
1083 u32 val;
1084
1085 /* Only 255 TX descriptors can be updated at once */
1086 while (sent_desc > 0xff) {
1087 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1088 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1089 sent_desc = sent_desc - 0xff;
1090 }
1091
1092 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1093 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1094}
1095
1096/* Get number of TX descriptors already sent by HW */
1097static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1098 struct mvneta_tx_queue *txq)
1099{
1100 u32 val;
1101 int sent_desc;
1102
1103 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1104 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1105 MVNETA_TXQ_SENT_DESC_SHIFT;
1106
1107 return sent_desc;
1108}
1109
1110/* Get number of sent descriptors and decrement counter.
1111 * The number of sent descriptors is returned.
1112 */
1113static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1114 struct mvneta_tx_queue *txq)
1115{
1116 int sent_desc;
1117
1118 /* Get number of sent descriptors */
1119 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1120
1121 /* Decrement sent descriptors counter */
1122 if (sent_desc)
1123 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1124
1125 return sent_desc;
1126}
1127
1128/* Set TXQ descriptors fields relevant for CSUM calculation */
1129static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1130 int ip_hdr_len, int l4_proto)
1131{
1132 u32 command;
1133
1134 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1135 * G_L4_chk, L4_type; required only for checksum
1136 * calculation
1137 */
1138 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1139 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1140
1141 if (l3_proto == swab16(ETH_P_IP))
1142 command |= MVNETA_TXD_IP_CSUM;
1143 else
1144 command |= MVNETA_TX_L3_IP6;
1145
1146 if (l4_proto == IPPROTO_TCP)
1147 command |= MVNETA_TX_L4_CSUM_FULL;
1148 else if (l4_proto == IPPROTO_UDP)
1149 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1150 else
1151 command |= MVNETA_TX_L4_CSUM_NOT;
1152
1153 return command;
1154}
1155
1156
1157/* Display more error info */
1158static void mvneta_rx_error(struct mvneta_port *pp,
1159 struct mvneta_rx_desc *rx_desc)
1160{
1161 u32 status = rx_desc->status;
1162
1163 if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
1164 netdev_err(pp->dev,
1165 "bad rx status %08x (buffer oversize), size=%d\n",
1166 rx_desc->status, rx_desc->data_size);
1167 return;
1168 }
1169
1170 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1171 case MVNETA_RXD_ERR_CRC:
1172 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1173 status, rx_desc->data_size);
1174 break;
1175 case MVNETA_RXD_ERR_OVERRUN:
1176 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1177 status, rx_desc->data_size);
1178 break;
1179 case MVNETA_RXD_ERR_LEN:
1180 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1181 status, rx_desc->data_size);
1182 break;
1183 case MVNETA_RXD_ERR_RESOURCE:
1184 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1185 status, rx_desc->data_size);
1186 break;
1187 }
1188}
1189
1190/* Handle RX checksum offload */
1191static void mvneta_rx_csum(struct mvneta_port *pp,
1192 struct mvneta_rx_desc *rx_desc,
1193 struct sk_buff *skb)
1194{
1195 if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
1196 (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
1197 skb->csum = 0;
1198 skb->ip_summed = CHECKSUM_UNNECESSARY;
1199 return;
1200 }
1201
1202 skb->ip_summed = CHECKSUM_NONE;
1203}
1204
1205/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1206static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1207 u32 cause)
1208{
1209 int queue = fls(cause) - 1;
1210
1211 return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
1212}
1213
1214/* Free tx queue skbuffs */
1215static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1216 struct mvneta_tx_queue *txq, int num)
1217{
1218 int i;
1219
1220 for (i = 0; i < num; i++) {
1221 struct mvneta_tx_desc *tx_desc = txq->descs +
1222 txq->txq_get_index;
1223 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1224
1225 mvneta_txq_inc_get(txq);
1226
1227 if (!skb)
1228 continue;
1229
1230 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1231 tx_desc->data_size, DMA_TO_DEVICE);
1232 dev_kfree_skb_any(skb);
1233 }
1234}
1235
1236/* Handle end of transmission */
1237static int mvneta_txq_done(struct mvneta_port *pp,
1238 struct mvneta_tx_queue *txq)
1239{
1240 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1241 int tx_done;
1242
1243 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1244 if (tx_done == 0)
1245 return tx_done;
1246 mvneta_txq_bufs_free(pp, txq, tx_done);
1247
1248 txq->count -= tx_done;
1249
1250 if (netif_tx_queue_stopped(nq)) {
1251 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1252 netif_tx_wake_queue(nq);
1253 }
1254
1255 return tx_done;
1256}
1257
1258/* Refill processing */
1259static int mvneta_rx_refill(struct mvneta_port *pp,
1260 struct mvneta_rx_desc *rx_desc)
1261
1262{
1263 dma_addr_t phys_addr;
1264 struct sk_buff *skb;
1265
1266 skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
1267 if (!skb)
1268 return -ENOMEM;
1269
1270 phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
1271 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1272 DMA_FROM_DEVICE);
1273 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1274 dev_kfree_skb(skb);
1275 return -ENOMEM;
1276 }
1277
1278 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1279
1280 return 0;
1281}
1282
1283/* Handle tx checksum */
1284static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1285{
1286 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1287 int ip_hdr_len = 0;
1288 u8 l4_proto;
1289
1290 if (skb->protocol == htons(ETH_P_IP)) {
1291 struct iphdr *ip4h = ip_hdr(skb);
1292
1293 /* Calculate IPv4 checksum and L4 checksum */
1294 ip_hdr_len = ip4h->ihl;
1295 l4_proto = ip4h->protocol;
1296 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1297 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1298
1299 /* Read l4_protocol from one of IPv6 extra headers */
1300 if (skb_network_header_len(skb) > 0)
1301 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1302 l4_proto = ip6h->nexthdr;
1303 } else
1304 return MVNETA_TX_L4_CSUM_NOT;
1305
1306 return mvneta_txq_desc_csum(skb_network_offset(skb),
1307 skb->protocol, ip_hdr_len, l4_proto);
1308 }
1309
1310 return MVNETA_TX_L4_CSUM_NOT;
1311}
1312
1313/* Returns rx queue pointer (find last set bit) according to causeRxTx
1314 * value
1315 */
1316static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1317 u32 cause)
1318{
1319 int queue = fls(cause >> 8) - 1;
1320
1321 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1322}
1323
1324/* Drop packets received by the RXQ and free buffers */
1325static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1326 struct mvneta_rx_queue *rxq)
1327{
1328 int rx_done, i;
1329
1330 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1331 for (i = 0; i < rxq->size; i++) {
1332 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1333 struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
1334
1335 dev_kfree_skb_any(skb);
1336 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1337 rx_desc->data_size, DMA_FROM_DEVICE);
1338 }
1339
1340 if (rx_done)
1341 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1342}
1343
1344/* Main rx processing */
1345static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1346 struct mvneta_rx_queue *rxq)
1347{
1348 struct net_device *dev = pp->dev;
1349 int rx_done, rx_filled;
1350
1351 /* Get number of received packets */
1352 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1353
1354 if (rx_todo > rx_done)
1355 rx_todo = rx_done;
1356
1357 rx_done = 0;
1358 rx_filled = 0;
1359
1360 /* Fairness NAPI loop */
1361 while (rx_done < rx_todo) {
1362 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1363 struct sk_buff *skb;
1364 u32 rx_status;
1365 int rx_bytes, err;
1366
1367 prefetch(rx_desc);
1368 rx_done++;
1369 rx_filled++;
1370 rx_status = rx_desc->status;
1371 skb = (struct sk_buff *)rx_desc->buf_cookie;
1372
1373 if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
1374 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1375 dev->stats.rx_errors++;
1376 mvneta_rx_error(pp, rx_desc);
1377 mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
1378 (u32)skb);
1379 continue;
1380 }
1381
1382 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1383 rx_desc->data_size, DMA_FROM_DEVICE);
1384
1385 rx_bytes = rx_desc->data_size -
1386 (ETH_FCS_LEN + MVNETA_MH_SIZE);
1387 u64_stats_update_begin(&pp->rx_stats.syncp);
1388 pp->rx_stats.packets++;
1389 pp->rx_stats.bytes += rx_bytes;
1390 u64_stats_update_end(&pp->rx_stats.syncp);
1391
1392 /* Linux processing */
1393 skb_reserve(skb, MVNETA_MH_SIZE);
1394 skb_put(skb, rx_bytes);
1395
1396 skb->protocol = eth_type_trans(skb, dev);
1397
1398 mvneta_rx_csum(pp, rx_desc, skb);
1399
1400 napi_gro_receive(&pp->napi, skb);
1401
1402 /* Refill processing */
1403 err = mvneta_rx_refill(pp, rx_desc);
1404 if (err) {
1405 netdev_err(pp->dev, "Linux processing - Can't refill\n");
1406 rxq->missed++;
1407 rx_filled--;
1408 }
1409 }
1410
1411 /* Update rxq management counters */
1412 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1413
1414 return rx_done;
1415}
1416
1417/* Handle tx fragmentation processing */
1418static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1419 struct mvneta_tx_queue *txq)
1420{
1421 struct mvneta_tx_desc *tx_desc;
1422 int i;
1423
1424 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1425 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1426 void *addr = page_address(frag->page.p) + frag->page_offset;
1427
1428 tx_desc = mvneta_txq_next_desc_get(txq);
1429 tx_desc->data_size = frag->size;
1430
1431 tx_desc->buf_phys_addr =
1432 dma_map_single(pp->dev->dev.parent, addr,
1433 tx_desc->data_size, DMA_TO_DEVICE);
1434
1435 if (dma_mapping_error(pp->dev->dev.parent,
1436 tx_desc->buf_phys_addr)) {
1437 mvneta_txq_desc_put(txq);
1438 goto error;
1439 }
1440
1441 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1442 /* Last descriptor */
1443 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1444
1445 txq->tx_skb[txq->txq_put_index] = skb;
1446
1447 mvneta_txq_inc_put(txq);
1448 } else {
1449 /* Descriptor in the middle: Not First, Not Last */
1450 tx_desc->command = 0;
1451
1452 txq->tx_skb[txq->txq_put_index] = NULL;
1453 mvneta_txq_inc_put(txq);
1454 }
1455 }
1456
1457 return 0;
1458
1459error:
1460 /* Release all descriptors that were used to map fragments of
1461 * this packet, as well as the corresponding DMA mappings
1462 */
1463 for (i = i - 1; i >= 0; i--) {
1464 tx_desc = txq->descs + i;
1465 dma_unmap_single(pp->dev->dev.parent,
1466 tx_desc->buf_phys_addr,
1467 tx_desc->data_size,
1468 DMA_TO_DEVICE);
1469 mvneta_txq_desc_put(txq);
1470 }
1471
1472 return -ENOMEM;
1473}
1474
1475/* Main tx processing */
1476static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1477{
1478 struct mvneta_port *pp = netdev_priv(dev);
1479 struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
1480 struct mvneta_tx_desc *tx_desc;
1481 struct netdev_queue *nq;
1482 int frags = 0;
1483 u32 tx_cmd;
1484
1485 if (!netif_running(dev))
1486 goto out;
1487
1488 frags = skb_shinfo(skb)->nr_frags + 1;
1489 nq = netdev_get_tx_queue(dev, txq_def);
1490
1491 /* Get a descriptor for the first part of the packet */
1492 tx_desc = mvneta_txq_next_desc_get(txq);
1493
1494 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1495
1496 tx_desc->data_size = skb_headlen(skb);
1497
1498 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1499 tx_desc->data_size,
1500 DMA_TO_DEVICE);
1501 if (unlikely(dma_mapping_error(dev->dev.parent,
1502 tx_desc->buf_phys_addr))) {
1503 mvneta_txq_desc_put(txq);
1504 frags = 0;
1505 goto out;
1506 }
1507
1508 if (frags == 1) {
1509 /* First and Last descriptor */
1510 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1511 tx_desc->command = tx_cmd;
1512 txq->tx_skb[txq->txq_put_index] = skb;
1513 mvneta_txq_inc_put(txq);
1514 } else {
1515 /* First but not Last */
1516 tx_cmd |= MVNETA_TXD_F_DESC;
1517 txq->tx_skb[txq->txq_put_index] = NULL;
1518 mvneta_txq_inc_put(txq);
1519 tx_desc->command = tx_cmd;
1520 /* Continue with other skb fragments */
1521 if (mvneta_tx_frag_process(pp, skb, txq)) {
1522 dma_unmap_single(dev->dev.parent,
1523 tx_desc->buf_phys_addr,
1524 tx_desc->data_size,
1525 DMA_TO_DEVICE);
1526 mvneta_txq_desc_put(txq);
1527 frags = 0;
1528 goto out;
1529 }
1530 }
1531
1532 txq->count += frags;
1533 mvneta_txq_pend_desc_add(pp, txq, frags);
1534
1535 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1536 netif_tx_stop_queue(nq);
1537
1538out:
1539 if (frags > 0) {
1540 u64_stats_update_begin(&pp->tx_stats.syncp);
1541 pp->tx_stats.packets++;
1542 pp->tx_stats.bytes += skb->len;
1543 u64_stats_update_end(&pp->tx_stats.syncp);
1544
1545 } else {
1546 dev->stats.tx_dropped++;
1547 dev_kfree_skb_any(skb);
1548 }
1549
1550 if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1551 mvneta_txq_done(pp, txq);
1552
1553 /* If after calling mvneta_txq_done, count equals
1554 * frags, we need to set the timer
1555 */
1556 if (txq->count == frags && frags > 0)
1557 mvneta_add_tx_done_timer(pp);
1558
1559 return NETDEV_TX_OK;
1560}
1561
1562
1563/* Free tx resources, when resetting a port */
1564static void mvneta_txq_done_force(struct mvneta_port *pp,
1565 struct mvneta_tx_queue *txq)
1566
1567{
1568 int tx_done = txq->count;
1569
1570 mvneta_txq_bufs_free(pp, txq, tx_done);
1571
1572 /* reset txq */
1573 txq->count = 0;
1574 txq->txq_put_index = 0;
1575 txq->txq_get_index = 0;
1576}
1577
1578/* handle tx done - called from tx done timer callback */
1579static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1580 int *tx_todo)
1581{
1582 struct mvneta_tx_queue *txq;
1583 u32 tx_done = 0;
1584 struct netdev_queue *nq;
1585
1586 *tx_todo = 0;
1587 while (cause_tx_done != 0) {
1588 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1589 if (!txq)
1590 break;
1591
1592 nq = netdev_get_tx_queue(pp->dev, txq->id);
1593 __netif_tx_lock(nq, smp_processor_id());
1594
1595 if (txq->count) {
1596 tx_done += mvneta_txq_done(pp, txq);
1597 *tx_todo += txq->count;
1598 }
1599
1600 __netif_tx_unlock(nq);
1601 cause_tx_done &= ~((1 << txq->id));
1602 }
1603
1604 return tx_done;
1605}
1606
1607/* Compute crc8 of the specified address, using a unique algorithm ,
1608 * according to hw spec, different than generic crc8 algorithm
1609 */
1610static int mvneta_addr_crc(unsigned char *addr)
1611{
1612 int crc = 0;
1613 int i;
1614
1615 for (i = 0; i < ETH_ALEN; i++) {
1616 int j;
1617
1618 crc = (crc ^ addr[i]) << 8;
1619 for (j = 7; j >= 0; j--) {
1620 if (crc & (0x100 << j))
1621 crc ^= 0x107 << j;
1622 }
1623 }
1624
1625 return crc;
1626}
1627
1628/* This method controls the net device special MAC multicast support.
1629 * The Special Multicast Table for MAC addresses supports MAC of the form
1630 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1631 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1632 * Table entries in the DA-Filter table. This method set the Special
1633 * Multicast Table appropriate entry.
1634 */
1635static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1636 unsigned char last_byte,
1637 int queue)
1638{
1639 unsigned int smc_table_reg;
1640 unsigned int tbl_offset;
1641 unsigned int reg_offset;
1642
1643 /* Register offset from SMC table base */
1644 tbl_offset = (last_byte / 4);
1645 /* Entry offset within the above reg */
1646 reg_offset = last_byte % 4;
1647
1648 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1649 + tbl_offset * 4));
1650
1651 if (queue == -1)
1652 smc_table_reg &= ~(0xff << (8 * reg_offset));
1653 else {
1654 smc_table_reg &= ~(0xff << (8 * reg_offset));
1655 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1656 }
1657
1658 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1659 smc_table_reg);
1660}
1661
1662/* This method controls the network device Other MAC multicast support.
1663 * The Other Multicast Table is used for multicast of another type.
1664 * A CRC-8 is used as an index to the Other Multicast Table entries
1665 * in the DA-Filter table.
1666 * The method gets the CRC-8 value from the calling routine and
1667 * sets the Other Multicast Table appropriate entry according to the
1668 * specified CRC-8 .
1669 */
1670static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1671 unsigned char crc8,
1672 int queue)
1673{
1674 unsigned int omc_table_reg;
1675 unsigned int tbl_offset;
1676 unsigned int reg_offset;
1677
1678 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1679 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1680
1681 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1682
1683 if (queue == -1) {
1684 /* Clear accepts frame bit at specified Other DA table entry */
1685 omc_table_reg &= ~(0xff << (8 * reg_offset));
1686 } else {
1687 omc_table_reg &= ~(0xff << (8 * reg_offset));
1688 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1689 }
1690
1691 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1692}
1693
1694/* The network device supports multicast using two tables:
1695 * 1) Special Multicast Table for MAC addresses of the form
1696 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1697 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1698 * Table entries in the DA-Filter table.
1699 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1700 * is used as an index to the Other Multicast Table entries in the
1701 * DA-Filter table.
1702 */
1703static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1704 int queue)
1705{
1706 unsigned char crc_result = 0;
1707
1708 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1709 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1710 return 0;
1711 }
1712
1713 crc_result = mvneta_addr_crc(p_addr);
1714 if (queue == -1) {
1715 if (pp->mcast_count[crc_result] == 0) {
1716 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1717 crc_result);
1718 return -EINVAL;
1719 }
1720
1721 pp->mcast_count[crc_result]--;
1722 if (pp->mcast_count[crc_result] != 0) {
1723 netdev_info(pp->dev,
1724 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1725 pp->mcast_count[crc_result], crc_result);
1726 return -EINVAL;
1727 }
1728 } else
1729 pp->mcast_count[crc_result]++;
1730
1731 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1732
1733 return 0;
1734}
1735
1736/* Configure Fitering mode of Ethernet port */
1737static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1738 int is_promisc)
1739{
1740 u32 port_cfg_reg, val;
1741
1742 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1743
1744 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1745
1746 /* Set / Clear UPM bit in port configuration register */
1747 if (is_promisc) {
1748 /* Accept all Unicast addresses */
1749 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1750 val |= MVNETA_FORCE_UNI;
1751 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1752 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1753 } else {
1754 /* Reject all Unicast addresses */
1755 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1756 val &= ~MVNETA_FORCE_UNI;
1757 }
1758
1759 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1760 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1761}
1762
1763/* register unicast and multicast addresses */
1764static void mvneta_set_rx_mode(struct net_device *dev)
1765{
1766 struct mvneta_port *pp = netdev_priv(dev);
1767 struct netdev_hw_addr *ha;
1768
1769 if (dev->flags & IFF_PROMISC) {
1770 /* Accept all: Multicast + Unicast */
1771 mvneta_rx_unicast_promisc_set(pp, 1);
1772 mvneta_set_ucast_table(pp, rxq_def);
1773 mvneta_set_special_mcast_table(pp, rxq_def);
1774 mvneta_set_other_mcast_table(pp, rxq_def);
1775 } else {
1776 /* Accept single Unicast */
1777 mvneta_rx_unicast_promisc_set(pp, 0);
1778 mvneta_set_ucast_table(pp, -1);
1779 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1780
1781 if (dev->flags & IFF_ALLMULTI) {
1782 /* Accept all multicast */
1783 mvneta_set_special_mcast_table(pp, rxq_def);
1784 mvneta_set_other_mcast_table(pp, rxq_def);
1785 } else {
1786 /* Accept only initialized multicast */
1787 mvneta_set_special_mcast_table(pp, -1);
1788 mvneta_set_other_mcast_table(pp, -1);
1789
1790 if (!netdev_mc_empty(dev)) {
1791 netdev_for_each_mc_addr(ha, dev) {
1792 mvneta_mcast_addr_set(pp, ha->addr,
1793 rxq_def);
1794 }
1795 }
1796 }
1797 }
1798}
1799
1800/* Interrupt handling - the callback for request_irq() */
1801static irqreturn_t mvneta_isr(int irq, void *dev_id)
1802{
1803 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1804
1805 /* Mask all interrupts */
1806 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1807
1808 napi_schedule(&pp->napi);
1809
1810 return IRQ_HANDLED;
1811}
1812
1813/* NAPI handler
1814 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1815 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1816 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1817 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1818 * Each CPU has its own causeRxTx register
1819 */
1820static int mvneta_poll(struct napi_struct *napi, int budget)
1821{
1822 int rx_done = 0;
1823 u32 cause_rx_tx;
1824 unsigned long flags;
1825 struct mvneta_port *pp = netdev_priv(napi->dev);
1826
1827 if (!netif_running(pp->dev)) {
1828 napi_complete(napi);
1829 return rx_done;
1830 }
1831
1832 /* Read cause register */
1833 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1834 MVNETA_RX_INTR_MASK(rxq_number);
1835
1836 /* For the case where the last mvneta_poll did not process all
1837 * RX packets
1838 */
1839 cause_rx_tx |= pp->cause_rx_tx;
1840 if (rxq_number > 1) {
1841 while ((cause_rx_tx != 0) && (budget > 0)) {
1842 int count;
1843 struct mvneta_rx_queue *rxq;
1844 /* get rx queue number from cause_rx_tx */
1845 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1846 if (!rxq)
1847 break;
1848
1849 /* process the packet in that rx queue */
1850 count = mvneta_rx(pp, budget, rxq);
1851 rx_done += count;
1852 budget -= count;
1853 if (budget > 0) {
1854 /* set off the rx bit of the
1855 * corresponding bit in the cause rx
1856 * tx register, so that next iteration
1857 * will find the next rx queue where
1858 * packets are received on
1859 */
1860 cause_rx_tx &= ~((1 << rxq->id) << 8);
1861 }
1862 }
1863 } else {
1864 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1865 budget -= rx_done;
1866 }
1867
1868 if (budget > 0) {
1869 cause_rx_tx = 0;
1870 napi_complete(napi);
1871 local_irq_save(flags);
1872 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1873 MVNETA_RX_INTR_MASK(rxq_number));
1874 local_irq_restore(flags);
1875 }
1876
1877 pp->cause_rx_tx = cause_rx_tx;
1878 return rx_done;
1879}
1880
1881/* tx done timer callback */
1882static void mvneta_tx_done_timer_callback(unsigned long data)
1883{
1884 struct net_device *dev = (struct net_device *)data;
1885 struct mvneta_port *pp = netdev_priv(dev);
1886 int tx_done = 0, tx_todo = 0;
1887
1888 if (!netif_running(dev))
1889 return ;
1890
1891 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1892
1893 tx_done = mvneta_tx_done_gbe(pp,
1894 (((1 << txq_number) - 1) &
1895 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1896 &tx_todo);
1897 if (tx_todo > 0)
1898 mvneta_add_tx_done_timer(pp);
1899}
1900
1901/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1902static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1903 int num)
1904{
1905 struct net_device *dev = pp->dev;
1906 int i;
1907
1908 for (i = 0; i < num; i++) {
1909 struct sk_buff *skb;
1910 struct mvneta_rx_desc *rx_desc;
1911 unsigned long phys_addr;
1912
1913 skb = dev_alloc_skb(pp->pkt_size);
1914 if (!skb) {
1915 netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
1916 __func__, rxq->id, i, num);
1917 break;
1918 }
1919
1920 rx_desc = rxq->descs + i;
1921 memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
1922 phys_addr = dma_map_single(dev->dev.parent, skb->head,
1923 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1924 DMA_FROM_DEVICE);
1925 if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
1926 dev_kfree_skb(skb);
1927 break;
1928 }
1929
1930 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1931 }
1932
1933 /* Add this number of RX descriptors as non occupied (ready to
1934 * get packets)
1935 */
1936 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1937
1938 return i;
1939}
1940
1941/* Free all packets pending transmit from all TXQs and reset TX port */
1942static void mvneta_tx_reset(struct mvneta_port *pp)
1943{
1944 int queue;
1945
1946 /* free the skb's in the hal tx ring */
1947 for (queue = 0; queue < txq_number; queue++)
1948 mvneta_txq_done_force(pp, &pp->txqs[queue]);
1949
1950 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1951 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1952}
1953
1954static void mvneta_rx_reset(struct mvneta_port *pp)
1955{
1956 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1957 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1958}
1959
1960/* Rx/Tx queue initialization/cleanup methods */
1961
1962/* Create a specified RX queue */
1963static int mvneta_rxq_init(struct mvneta_port *pp,
1964 struct mvneta_rx_queue *rxq)
1965
1966{
1967 rxq->size = pp->rx_ring_size;
1968
1969 /* Allocate memory for RX descriptors */
1970 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
1971 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1972 &rxq->descs_phys, GFP_KERNEL);
1973 if (rxq->descs == NULL) {
1974 netdev_err(pp->dev,
1975 "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1976 rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1977 rxq->size);
1978 return -ENOMEM;
1979 }
1980
1981 BUG_ON(rxq->descs !=
1982 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
1983
1984 rxq->last_desc = rxq->size - 1;
1985
1986 /* Set Rx descriptors queue starting address */
1987 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1988 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1989
1990 /* Set Offset */
1991 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
1992
1993 /* Set coalescing pkts and time */
1994 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
1995 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
1996
1997 /* Fill RXQ with buffers from RX pool */
1998 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
1999 mvneta_rxq_bm_disable(pp, rxq);
2000 mvneta_rxq_fill(pp, rxq, rxq->size);
2001
2002 return 0;
2003}
2004
2005/* Cleanup Rx queue */
2006static void mvneta_rxq_deinit(struct mvneta_port *pp,
2007 struct mvneta_rx_queue *rxq)
2008{
2009 mvneta_rxq_drop_pkts(pp, rxq);
2010
2011 if (rxq->descs)
2012 dma_free_coherent(pp->dev->dev.parent,
2013 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2014 rxq->descs,
2015 rxq->descs_phys);
2016
2017 rxq->descs = NULL;
2018 rxq->last_desc = 0;
2019 rxq->next_desc_to_proc = 0;
2020 rxq->descs_phys = 0;
2021}
2022
2023/* Create and initialize a tx queue */
2024static int mvneta_txq_init(struct mvneta_port *pp,
2025 struct mvneta_tx_queue *txq)
2026{
2027 txq->size = pp->tx_ring_size;
2028
2029 /* Allocate memory for TX descriptors */
2030 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2031 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2032 &txq->descs_phys, GFP_KERNEL);
2033 if (txq->descs == NULL) {
2034 netdev_err(pp->dev,
2035 "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2036 txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
2037 txq->size);
2038 return -ENOMEM;
2039 }
2040
2041 /* Make sure descriptor address is cache line size aligned */
2042 BUG_ON(txq->descs !=
2043 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2044
2045 txq->last_desc = txq->size - 1;
2046
2047 /* Set maximum bandwidth for enabled TXQs */
2048 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2049 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2050
2051 /* Set Tx descriptors queue starting address */
2052 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2053 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2054
2055 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2056 if (txq->tx_skb == NULL) {
2057 dma_free_coherent(pp->dev->dev.parent,
2058 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2059 txq->descs, txq->descs_phys);
2060 return -ENOMEM;
2061 }
2062 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2063
2064 return 0;
2065}
2066
2067/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2068static void mvneta_txq_deinit(struct mvneta_port *pp,
2069 struct mvneta_tx_queue *txq)
2070{
2071 kfree(txq->tx_skb);
2072
2073 if (txq->descs)
2074 dma_free_coherent(pp->dev->dev.parent,
2075 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2076 txq->descs, txq->descs_phys);
2077
2078 txq->descs = NULL;
2079 txq->last_desc = 0;
2080 txq->next_desc_to_proc = 0;
2081 txq->descs_phys = 0;
2082
2083 /* Set minimum bandwidth for disabled TXQs */
2084 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2085 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2086
2087 /* Set Tx descriptors queue starting address and size */
2088 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2089 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2090}
2091
2092/* Cleanup all Tx queues */
2093static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2094{
2095 int queue;
2096
2097 for (queue = 0; queue < txq_number; queue++)
2098 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2099}
2100
2101/* Cleanup all Rx queues */
2102static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2103{
2104 int queue;
2105
2106 for (queue = 0; queue < rxq_number; queue++)
2107 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2108}
2109
2110
2111/* Init all Rx queues */
2112static int mvneta_setup_rxqs(struct mvneta_port *pp)
2113{
2114 int queue;
2115
2116 for (queue = 0; queue < rxq_number; queue++) {
2117 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2118 if (err) {
2119 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2120 __func__, queue);
2121 mvneta_cleanup_rxqs(pp);
2122 return err;
2123 }
2124 }
2125
2126 return 0;
2127}
2128
2129/* Init all tx queues */
2130static int mvneta_setup_txqs(struct mvneta_port *pp)
2131{
2132 int queue;
2133
2134 for (queue = 0; queue < txq_number; queue++) {
2135 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2136 if (err) {
2137 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2138 __func__, queue);
2139 mvneta_cleanup_txqs(pp);
2140 return err;
2141 }
2142 }
2143
2144 return 0;
2145}
2146
2147static void mvneta_start_dev(struct mvneta_port *pp)
2148{
2149 mvneta_max_rx_size_set(pp, pp->pkt_size);
2150 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2151
2152 /* start the Rx/Tx activity */
2153 mvneta_port_enable(pp);
2154
2155 /* Enable polling on the port */
2156 napi_enable(&pp->napi);
2157
2158 /* Unmask interrupts */
2159 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2160 MVNETA_RX_INTR_MASK(rxq_number));
2161
2162 phy_start(pp->phy_dev);
2163 netif_tx_start_all_queues(pp->dev);
2164}
2165
2166static void mvneta_stop_dev(struct mvneta_port *pp)
2167{
2168 phy_stop(pp->phy_dev);
2169
2170 napi_disable(&pp->napi);
2171
2172 netif_carrier_off(pp->dev);
2173
2174 mvneta_port_down(pp);
2175 netif_tx_stop_all_queues(pp->dev);
2176
2177 /* Stop the port activity */
2178 mvneta_port_disable(pp);
2179
2180 /* Clear all ethernet port interrupts */
2181 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2182 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2183
2184 /* Mask all ethernet port interrupts */
2185 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2186 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2187 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2188
2189 mvneta_tx_reset(pp);
2190 mvneta_rx_reset(pp);
2191}
2192
2193/* tx timeout callback - display a message and stop/start the network device */
2194static void mvneta_tx_timeout(struct net_device *dev)
2195{
2196 struct mvneta_port *pp = netdev_priv(dev);
2197
2198 netdev_info(dev, "tx timeout\n");
2199 mvneta_stop_dev(pp);
2200 mvneta_start_dev(pp);
2201}
2202
2203/* Return positive if MTU is valid */
2204static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2205{
2206 if (mtu < 68) {
2207 netdev_err(dev, "cannot change mtu to less than 68\n");
2208 return -EINVAL;
2209 }
2210
2211 /* 9676 == 9700 - 20 and rounding to 8 */
2212 if (mtu > 9676) {
2213 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2214 mtu = 9676;
2215 }
2216
2217 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2218 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2219 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2220 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2221 }
2222
2223 return mtu;
2224}
2225
2226/* Change the device mtu */
2227static int mvneta_change_mtu(struct net_device *dev, int mtu)
2228{
2229 struct mvneta_port *pp = netdev_priv(dev);
2230 int ret;
2231
2232 mtu = mvneta_check_mtu_valid(dev, mtu);
2233 if (mtu < 0)
2234 return -EINVAL;
2235
2236 dev->mtu = mtu;
2237
2238 if (!netif_running(dev))
2239 return 0;
2240
2241 /* The interface is running, so we have to force a
2242 * reallocation of the RXQs
2243 */
2244 mvneta_stop_dev(pp);
2245
2246 mvneta_cleanup_txqs(pp);
2247 mvneta_cleanup_rxqs(pp);
2248
2249 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2250
2251 ret = mvneta_setup_rxqs(pp);
2252 if (ret) {
2253 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2254 return ret;
2255 }
2256
2257 mvneta_setup_txqs(pp);
2258
2259 mvneta_start_dev(pp);
2260 mvneta_port_up(pp);
2261
2262 return 0;
2263}
2264
2265/* Handle setting mac address */
2266static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2267{
2268 struct mvneta_port *pp = netdev_priv(dev);
2269 u8 *mac = addr + 2;
2270 int i;
2271
2272 if (netif_running(dev))
2273 return -EBUSY;
2274
2275 /* Remove previous address table entry */
2276 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2277
2278 /* Set new addr in hw */
2279 mvneta_mac_addr_set(pp, mac, rxq_def);
2280
2281 /* Set addr in the device */
2282 for (i = 0; i < ETH_ALEN; i++)
2283 dev->dev_addr[i] = mac[i];
2284
2285 return 0;
2286}
2287
2288static void mvneta_adjust_link(struct net_device *ndev)
2289{
2290 struct mvneta_port *pp = netdev_priv(ndev);
2291 struct phy_device *phydev = pp->phy_dev;
2292 int status_change = 0;
2293
2294 if (phydev->link) {
2295 if ((pp->speed != phydev->speed) ||
2296 (pp->duplex != phydev->duplex)) {
2297 u32 val;
2298
2299 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2300 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2301 MVNETA_GMAC_CONFIG_GMII_SPEED |
2302 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2303
2304 if (phydev->duplex)
2305 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2306
2307 if (phydev->speed == SPEED_1000)
2308 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2309 else
2310 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2311
2312 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2313
2314 pp->duplex = phydev->duplex;
2315 pp->speed = phydev->speed;
2316 }
2317 }
2318
2319 if (phydev->link != pp->link) {
2320 if (!phydev->link) {
2321 pp->duplex = -1;
2322 pp->speed = 0;
2323 }
2324
2325 pp->link = phydev->link;
2326 status_change = 1;
2327 }
2328
2329 if (status_change) {
2330 if (phydev->link) {
2331 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2332 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2333 MVNETA_GMAC_FORCE_LINK_DOWN);
2334 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2335 mvneta_port_up(pp);
2336 netdev_info(pp->dev, "link up\n");
2337 } else {
2338 mvneta_port_down(pp);
2339 netdev_info(pp->dev, "link down\n");
2340 }
2341 }
2342}
2343
2344static int mvneta_mdio_probe(struct mvneta_port *pp)
2345{
2346 struct phy_device *phy_dev;
2347
2348 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2349 pp->phy_interface);
2350 if (!phy_dev) {
2351 netdev_err(pp->dev, "could not find the PHY\n");
2352 return -ENODEV;
2353 }
2354
2355 phy_dev->supported &= PHY_GBIT_FEATURES;
2356 phy_dev->advertising = phy_dev->supported;
2357
2358 pp->phy_dev = phy_dev;
2359 pp->link = 0;
2360 pp->duplex = 0;
2361 pp->speed = 0;
2362
2363 return 0;
2364}
2365
2366static void mvneta_mdio_remove(struct mvneta_port *pp)
2367{
2368 phy_disconnect(pp->phy_dev);
2369 pp->phy_dev = NULL;
2370}
2371
2372static int mvneta_open(struct net_device *dev)
2373{
2374 struct mvneta_port *pp = netdev_priv(dev);
2375 int ret;
2376
2377 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2378
2379 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2380
2381 ret = mvneta_setup_rxqs(pp);
2382 if (ret)
2383 return ret;
2384
2385 ret = mvneta_setup_txqs(pp);
2386 if (ret)
2387 goto err_cleanup_rxqs;
2388
2389 /* Connect to port interrupt line */
2390 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2391 MVNETA_DRIVER_NAME, pp);
2392 if (ret) {
2393 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2394 goto err_cleanup_txqs;
2395 }
2396
2397 /* In default link is down */
2398 netif_carrier_off(pp->dev);
2399
2400 ret = mvneta_mdio_probe(pp);
2401 if (ret < 0) {
2402 netdev_err(dev, "cannot probe MDIO bus\n");
2403 goto err_free_irq;
2404 }
2405
2406 mvneta_start_dev(pp);
2407
2408 return 0;
2409
2410err_free_irq:
2411 free_irq(pp->dev->irq, pp);
2412err_cleanup_txqs:
2413 mvneta_cleanup_txqs(pp);
2414err_cleanup_rxqs:
2415 mvneta_cleanup_rxqs(pp);
2416 return ret;
2417}
2418
2419/* Stop the port, free port interrupt line */
2420static int mvneta_stop(struct net_device *dev)
2421{
2422 struct mvneta_port *pp = netdev_priv(dev);
2423
2424 mvneta_stop_dev(pp);
2425 mvneta_mdio_remove(pp);
2426 free_irq(dev->irq, pp);
2427 mvneta_cleanup_rxqs(pp);
2428 mvneta_cleanup_txqs(pp);
2429 del_timer(&pp->tx_done_timer);
2430 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2431
2432 return 0;
2433}
2434
2435/* Ethtool methods */
2436
2437/* Get settings (phy address, speed) for ethtools */
2438int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2439{
2440 struct mvneta_port *pp = netdev_priv(dev);
2441
2442 if (!pp->phy_dev)
2443 return -ENODEV;
2444
2445 return phy_ethtool_gset(pp->phy_dev, cmd);
2446}
2447
2448/* Set settings (phy address, speed) for ethtools */
2449int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2450{
2451 struct mvneta_port *pp = netdev_priv(dev);
2452
2453 if (!pp->phy_dev)
2454 return -ENODEV;
2455
2456 return phy_ethtool_sset(pp->phy_dev, cmd);
2457}
2458
2459/* Set interrupt coalescing for ethtools */
2460static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2461 struct ethtool_coalesce *c)
2462{
2463 struct mvneta_port *pp = netdev_priv(dev);
2464 int queue;
2465
2466 for (queue = 0; queue < rxq_number; queue++) {
2467 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2468 rxq->time_coal = c->rx_coalesce_usecs;
2469 rxq->pkts_coal = c->rx_max_coalesced_frames;
2470 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2471 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2472 }
2473
2474 for (queue = 0; queue < txq_number; queue++) {
2475 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2476 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2477 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2478 }
2479
2480 return 0;
2481}
2482
2483/* get coalescing for ethtools */
2484static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2485 struct ethtool_coalesce *c)
2486{
2487 struct mvneta_port *pp = netdev_priv(dev);
2488
2489 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2490 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2491
2492 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2493 return 0;
2494}
2495
2496
2497static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2498 struct ethtool_drvinfo *drvinfo)
2499{
2500 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2501 sizeof(drvinfo->driver));
2502 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2503 sizeof(drvinfo->version));
2504 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2505 sizeof(drvinfo->bus_info));
2506}
2507
2508
2509static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2510 struct ethtool_ringparam *ring)
2511{
2512 struct mvneta_port *pp = netdev_priv(netdev);
2513
2514 ring->rx_max_pending = MVNETA_MAX_RXD;
2515 ring->tx_max_pending = MVNETA_MAX_TXD;
2516 ring->rx_pending = pp->rx_ring_size;
2517 ring->tx_pending = pp->tx_ring_size;
2518}
2519
2520static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2521 struct ethtool_ringparam *ring)
2522{
2523 struct mvneta_port *pp = netdev_priv(dev);
2524
2525 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2526 return -EINVAL;
2527 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2528 ring->rx_pending : MVNETA_MAX_RXD;
2529 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2530 ring->tx_pending : MVNETA_MAX_TXD;
2531
2532 if (netif_running(dev)) {
2533 mvneta_stop(dev);
2534 if (mvneta_open(dev)) {
2535 netdev_err(dev,
2536 "error on opening device after ring param change\n");
2537 return -ENOMEM;
2538 }
2539 }
2540
2541 return 0;
2542}
2543
2544static const struct net_device_ops mvneta_netdev_ops = {
2545 .ndo_open = mvneta_open,
2546 .ndo_stop = mvneta_stop,
2547 .ndo_start_xmit = mvneta_tx,
2548 .ndo_set_rx_mode = mvneta_set_rx_mode,
2549 .ndo_set_mac_address = mvneta_set_mac_addr,
2550 .ndo_change_mtu = mvneta_change_mtu,
2551 .ndo_tx_timeout = mvneta_tx_timeout,
2552 .ndo_get_stats64 = mvneta_get_stats64,
2553};
2554
2555const struct ethtool_ops mvneta_eth_tool_ops = {
2556 .get_link = ethtool_op_get_link,
2557 .get_settings = mvneta_ethtool_get_settings,
2558 .set_settings = mvneta_ethtool_set_settings,
2559 .set_coalesce = mvneta_ethtool_set_coalesce,
2560 .get_coalesce = mvneta_ethtool_get_coalesce,
2561 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2562 .get_ringparam = mvneta_ethtool_get_ringparam,
2563 .set_ringparam = mvneta_ethtool_set_ringparam,
2564};
2565
2566/* Initialize hw */
2567static int __devinit mvneta_init(struct mvneta_port *pp, int phy_addr)
2568{
2569 int queue;
2570
2571 /* Disable port */
2572 mvneta_port_disable(pp);
2573
2574 /* Set port default values */
2575 mvneta_defaults_set(pp);
2576
2577 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2578 GFP_KERNEL);
2579 if (!pp->txqs)
2580 return -ENOMEM;
2581
2582 /* Initialize TX descriptor rings */
2583 for (queue = 0; queue < txq_number; queue++) {
2584 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2585 txq->id = queue;
2586 txq->size = pp->tx_ring_size;
2587 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2588 }
2589
2590 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2591 GFP_KERNEL);
2592 if (!pp->rxqs) {
2593 kfree(pp->txqs);
2594 return -ENOMEM;
2595 }
2596
2597 /* Create Rx descriptor rings */
2598 for (queue = 0; queue < rxq_number; queue++) {
2599 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2600 rxq->id = queue;
2601 rxq->size = pp->rx_ring_size;
2602 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2603 rxq->time_coal = MVNETA_RX_COAL_USEC;
2604 }
2605
2606 return 0;
2607}
2608
2609static void mvneta_deinit(struct mvneta_port *pp)
2610{
2611 kfree(pp->txqs);
2612 kfree(pp->rxqs);
2613}
2614
2615/* platform glue : initialize decoding windows */
2616static void __devinit
2617mvneta_conf_mbus_windows(struct mvneta_port *pp,
2618 const struct mbus_dram_target_info *dram)
2619{
2620 u32 win_enable;
2621 u32 win_protect;
2622 int i;
2623
2624 for (i = 0; i < 6; i++) {
2625 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2626 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2627
2628 if (i < 4)
2629 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2630 }
2631
2632 win_enable = 0x3f;
2633 win_protect = 0;
2634
2635 for (i = 0; i < dram->num_cs; i++) {
2636 const struct mbus_dram_window *cs = dram->cs + i;
2637 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2638 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2639
2640 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2641 (cs->size - 1) & 0xffff0000);
2642
2643 win_enable &= ~(1 << i);
2644 win_protect |= 3 << (2 * i);
2645 }
2646
2647 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2648}
2649
2650/* Power up the port */
2651static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2652{
2653 u32 val;
2654
2655 /* MAC Cause register should be cleared */
2656 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2657
2658 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2659 mvneta_port_sgmii_config(pp);
2660
2661 mvneta_gmac_rgmii_set(pp, 1);
2662
2663 /* Cancel Port Reset */
2664 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2665 val &= ~MVNETA_GMAC2_PORT_RESET;
2666 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2667
2668 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2669 MVNETA_GMAC2_PORT_RESET) != 0)
2670 continue;
2671}
2672
2673/* Device initialization routine */
2674static int __devinit mvneta_probe(struct platform_device *pdev)
2675{
2676 const struct mbus_dram_target_info *dram_target_info;
2677 struct device_node *dn = pdev->dev.of_node;
2678 struct device_node *phy_node;
2679 u32 phy_addr;
2680 struct mvneta_port *pp;
2681 struct net_device *dev;
2682 const char *mac_addr;
2683 int phy_mode;
2684 int err;
2685
2686 /* Our multiqueue support is not complete, so for now, only
2687 * allow the usage of the first RX queue
2688 */
2689 if (rxq_def != 0) {
2690 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2691 return -EINVAL;
2692 }
2693
2694 dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
2695 if (!dev)
2696 return -ENOMEM;
2697
2698 dev->irq = irq_of_parse_and_map(dn, 0);
2699 if (dev->irq == 0) {
2700 err = -EINVAL;
2701 goto err_free_netdev;
2702 }
2703
2704 phy_node = of_parse_phandle(dn, "phy", 0);
2705 if (!phy_node) {
2706 dev_err(&pdev->dev, "no associated PHY\n");
2707 err = -ENODEV;
2708 goto err_free_irq;
2709 }
2710
2711 phy_mode = of_get_phy_mode(dn);
2712 if (phy_mode < 0) {
2713 dev_err(&pdev->dev, "incorrect phy-mode\n");
2714 err = -EINVAL;
2715 goto err_free_irq;
2716 }
2717
2718 mac_addr = of_get_mac_address(dn);
2719
2720 if (!mac_addr || !is_valid_ether_addr(mac_addr))
2721 eth_hw_addr_random(dev);
2722 else
2723 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
2724
2725 dev->tx_queue_len = MVNETA_MAX_TXD;
2726 dev->watchdog_timeo = 5 * HZ;
2727 dev->netdev_ops = &mvneta_netdev_ops;
2728
2729 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2730
2731 pp = netdev_priv(dev);
2732
2733 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2734 init_timer(&pp->tx_done_timer);
2735 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2736
2737 pp->weight = MVNETA_RX_POLL_WEIGHT;
2738 pp->phy_node = phy_node;
2739 pp->phy_interface = phy_mode;
2740
2741 pp->base = of_iomap(dn, 0);
2742 if (pp->base == NULL) {
2743 err = -ENOMEM;
2744 goto err_free_irq;
2745 }
2746
2747 pp->clk = devm_clk_get(&pdev->dev, NULL);
2748 if (IS_ERR(pp->clk)) {
2749 err = PTR_ERR(pp->clk);
2750 goto err_unmap;
2751 }
2752
2753 clk_prepare_enable(pp->clk);
2754
2755 pp->tx_done_timer.data = (unsigned long)dev;
2756
2757 pp->tx_ring_size = MVNETA_MAX_TXD;
2758 pp->rx_ring_size = MVNETA_MAX_RXD;
2759
2760 pp->dev = dev;
2761 SET_NETDEV_DEV(dev, &pdev->dev);
2762
2763 err = mvneta_init(pp, phy_addr);
2764 if (err < 0) {
2765 dev_err(&pdev->dev, "can't init eth hal\n");
2766 goto err_clk;
2767 }
2768 mvneta_port_power_up(pp, phy_mode);
2769
2770 dram_target_info = mv_mbus_dram_info();
2771 if (dram_target_info)
2772 mvneta_conf_mbus_windows(pp, dram_target_info);
2773
2774 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2775
2776 err = register_netdev(dev);
2777 if (err < 0) {
2778 dev_err(&pdev->dev, "failed to register\n");
2779 goto err_deinit;
2780 }
2781
2782 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2783 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2784 dev->priv_flags |= IFF_UNICAST_FLT;
2785
2786 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
2787
2788 platform_set_drvdata(pdev, pp->dev);
2789
2790 return 0;
2791
2792err_deinit:
2793 mvneta_deinit(pp);
2794err_clk:
2795 clk_disable_unprepare(pp->clk);
2796err_unmap:
2797 iounmap(pp->base);
2798err_free_irq:
2799 irq_dispose_mapping(dev->irq);
2800err_free_netdev:
2801 free_netdev(dev);
2802 return err;
2803}
2804
2805/* Device removal routine */
2806static int __devexit mvneta_remove(struct platform_device *pdev)
2807{
2808 struct net_device *dev = platform_get_drvdata(pdev);
2809 struct mvneta_port *pp = netdev_priv(dev);
2810
2811 unregister_netdev(dev);
2812 mvneta_deinit(pp);
2813 clk_disable_unprepare(pp->clk);
2814 iounmap(pp->base);
2815 irq_dispose_mapping(dev->irq);
2816 free_netdev(dev);
2817
2818 platform_set_drvdata(pdev, NULL);
2819
2820 return 0;
2821}
2822
2823static const struct of_device_id mvneta_match[] = {
2824 { .compatible = "marvell,armada-370-neta" },
2825 { }
2826};
2827MODULE_DEVICE_TABLE(of, mvneta_match);
2828
2829static struct platform_driver mvneta_driver = {
2830 .probe = mvneta_probe,
2831 .remove = __devexit_p(mvneta_remove),
2832 .driver = {
2833 .name = MVNETA_DRIVER_NAME,
2834 .of_match_table = mvneta_match,
2835 },
2836};
2837
2838module_platform_driver(mvneta_driver);
2839
2840MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2841MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2842MODULE_LICENSE("GPL");
2843
2844module_param(rxq_number, int, S_IRUGO);
2845module_param(txq_number, int, S_IRUGO);
2846
2847module_param(rxq_def, int, S_IRUGO);
2848module_param(txq_def, int, S_IRUGO);