aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcin Wojtas <mw@semihalf.com>2014-07-10 15:52:13 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-10 20:18:24 -0400
commit3f518509dedc99f0b755d2ce68d24f610e3a005a (patch)
treedec5c4c2a566e0957f34bec64f5cf2b2a77d141d
parentb6428817190c5444294e0cc45bd571bfafbbb537 (diff)
ethernet: Add new driver for Marvell Armada 375 network unit
This commit adds a new network driver for the network controller in Marvell Armada 375 SoC. Given the controller is very different from the ones in the other Marvell SoCs that use the mv643xx_eth (Kirkwood, Orion, Discovery) and mvneta (Armada 370/38x/XP) drivers, a new driver is needed. Signed-off-by: Marcin Wojtas <mw@semihalf.com> [Ezequiel: coding style cleanup] Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt61
-rw-r--r--drivers/net/ethernet/marvell/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c6393
4 files changed, 6463 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
new file mode 100644
index 000000000000..aa4f4230bfd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -0,0 +1,61 @@
1* Marvell Armada 375 Ethernet Controller (PPv2)
2
3Required properties:
4
5- compatible: should be "marvell,armada-375-pp2"
6- reg: addresses and length of the register sets for the device.
7 Must contain the following register sets:
8 - common controller registers
9 - LMS registers
10 In addition, at least one port register set is required.
11- clocks: a pointer to the reference clocks for this device, consequently:
12 - main controller clock
13 - GOP clock
14- clock-names: names of used clocks, must be "pp_clk" and "gop_clk".
15
16The ethernet ports are represented by subnodes. At least one port is
17required.
18
19Required properties (port):
20
21- interrupts: interrupt for the port
22- port-id: should be '0' or '1' for ethernet ports, and '2' for the
23 loopback port
24- phy-mode: See ethernet.txt file in the same directory
25
26Optional properties (port):
27
28- marvell,loopback: port is loopback mode
29- phy: a phandle to a phy node defining the PHY address (as the reg
30 property, a single integer). Note: if this property isn't present,
31 then fixed link is assumed, and the 'fixed-link' property is
32 mandatory.
33
34Example:
35
36ethernet@f0000 {
37 compatible = "marvell,armada-375-pp2";
38 reg = <0xf0000 0xa000>,
39 <0xc0000 0x3060>,
40 <0xc4000 0x100>,
41 <0xc5000 0x100>;
42 clocks = <&gateclk 3>, <&gateclk 19>;
43 clock-names = "pp_clk", "gop_clk";
44 status = "okay";
45
46 eth0: eth0@c4000 {
47 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
48 port-id = <0>;
49 status = "okay";
50 phy = <&phy0>;
51 phy-mode = "gmii";
52 };
53
54 eth1: eth1@c5000 {
55 interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
56 port-id = <1>;
57 status = "okay";
58 phy = <&phy3>;
59 phy-mode = "gmii";
60 };
61};
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 68e6a6613e9a..1b4fc7c639e6 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -54,6 +54,14 @@ config MVNETA
54 driver, which should be used for the older Marvell SoCs 54 driver, which should be used for the older Marvell SoCs
55 (Dove, Orion, Discovery, Kirkwood). 55 (Dove, Orion, Discovery, Kirkwood).
56 56
57config MVPP2
58 tristate "Marvell Armada 375 network interface support"
59 depends on MACH_ARMADA_375
60 select MVMDIO
61 ---help---
62 This driver supports the network interface units in the
63 Marvell ARMADA 375 SoC.
64
57config PXA168_ETH 65config PXA168_ETH
58 tristate "Marvell pxa168 ethernet support" 66 tristate "Marvell pxa168 ethernet support"
59 depends on CPU_PXA168 67 depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 5c4a7765ff0e..f6425bd2884b 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -5,6 +5,7 @@
5obj-$(CONFIG_MVMDIO) += mvmdio.o 5obj-$(CONFIG_MVMDIO) += mvmdio.o
6obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 6obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
7obj-$(CONFIG_MVNETA) += mvneta.o 7obj-$(CONFIG_MVNETA) += mvneta.o
8obj-$(CONFIG_MVPP2) += mvpp2.o
8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o 9obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
9obj-$(CONFIG_SKGE) += skge.o 10obj-$(CONFIG_SKGE) += skge.o
10obj-$(CONFIG_SKY2) += sky2.o 11obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
new file mode 100644
index 000000000000..9463ede32e6a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -0,0 +1,6393 @@
1/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
28#include <linux/phy.h>
29#include <linux/clk.h>
30#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h>
32#include <net/ipv6.h>
33
34/* RX Fifo Registers */
35#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
36#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
37#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
38#define MVPP2_RX_FIFO_INIT_REG 0x64
39
40/* RX DMA Top Registers */
41#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
42#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
43#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
44#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
45#define MVPP2_POOL_BUF_SIZE_OFFSET 5
46#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
47#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
48#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
49#define MVPP2_RXQ_POOL_SHORT_OFFS 20
50#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
51#define MVPP2_RXQ_POOL_LONG_OFFS 24
52#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
53#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
54#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
55#define MVPP2_RXQ_DISABLE_MASK BIT(31)
56
57/* Parser Registers */
58#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
59#define MVPP2_PRS_PORT_LU_MAX 0xf
60#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
61#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
62#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
63#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
64#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
65#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
66#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
67#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
68#define MVPP2_PRS_TCAM_IDX_REG 0x1100
69#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
70#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
71#define MVPP2_PRS_SRAM_IDX_REG 0x1200
72#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
73#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
74#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
75
76/* Classifier Registers */
77#define MVPP2_CLS_MODE_REG 0x1800
78#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
79#define MVPP2_CLS_PORT_WAY_REG 0x1810
80#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
81#define MVPP2_CLS_LKP_INDEX_REG 0x1814
82#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
83#define MVPP2_CLS_LKP_TBL_REG 0x1818
84#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
85#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
86#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
87#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
88#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
89#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
90#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
91#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
92#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
93#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
94#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
95#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
96
97/* Descriptor Manager Top Registers */
98#define MVPP2_RXQ_NUM_REG 0x2040
99#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
100#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
101#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
102#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
103#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
104#define MVPP2_RXQ_NUM_NEW_OFFSET 16
105#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
106#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
107#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
108#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
109#define MVPP2_RXQ_THRESH_REG 0x204c
110#define MVPP2_OCCUPIED_THRESH_OFFSET 0
111#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
112#define MVPP2_RXQ_INDEX_REG 0x2050
113#define MVPP2_TXQ_NUM_REG 0x2080
114#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
115#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
116#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
117#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
118#define MVPP2_TXQ_THRESH_REG 0x2094
119#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
120#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
121#define MVPP2_TXQ_INDEX_REG 0x2098
122#define MVPP2_TXQ_PREF_BUF_REG 0x209c
123#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
124#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
125#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
126#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
127#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
128#define MVPP2_TXQ_PENDING_REG 0x20a0
129#define MVPP2_TXQ_PENDING_MASK 0x3fff
130#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
131#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
132#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
133#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
134#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
135#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
136#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
137#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
138#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
139#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
140#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
141#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
142#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
143#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
144#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
145#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
146
147/* MBUS bridge registers */
148#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
149#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
150#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
151#define MVPP2_BASE_ADDR_ENABLE 0x4060
152
153/* Interrupt Cause and Mask registers */
154#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
155#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
156#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
157#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
158#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
159#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
160#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
161#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
162#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
163#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
164#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
165#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
166#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
167#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
168#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
169#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
170#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
171#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
172#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
173#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
174
175/* Buffer Manager registers */
176#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
177#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
178#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
179#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
180#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
181#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
182#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
183#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
184#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
185#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
186#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
187#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
188#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
189#define MVPP2_BM_START_MASK BIT(0)
190#define MVPP2_BM_STOP_MASK BIT(1)
191#define MVPP2_BM_STATE_MASK BIT(4)
192#define MVPP2_BM_LOW_THRESH_OFFS 8
193#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
194#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
195 MVPP2_BM_LOW_THRESH_OFFS)
196#define MVPP2_BM_HIGH_THRESH_OFFS 16
197#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
198#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
199 MVPP2_BM_HIGH_THRESH_OFFS)
200#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
201#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
202#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
203#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
204#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
205#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
206#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
207#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
208#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
209#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
210#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
211#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
212#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
213#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
214#define MVPP2_BM_VIRT_RLS_REG 0x64c0
215#define MVPP2_BM_MC_RLS_REG 0x64c4
216#define MVPP2_BM_MC_ID_MASK 0xfff
217#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
218
219/* TX Scheduler registers */
220#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
221#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
222#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
223#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
224#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
225#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
226#define MVPP2_TXP_SCHED_MTU_REG 0x801c
227#define MVPP2_TXP_MTU_MAX 0x7FFFF
228#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
229#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
230#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
231#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
232#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
233#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
234#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
235#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
236#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
237#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
238#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
239#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
240#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
241#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
242
243/* TX general registers */
244#define MVPP2_TX_SNOOP_REG 0x8800
245#define MVPP2_TX_PORT_FLUSH_REG 0x8810
246#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
247
248/* LMS registers */
249#define MVPP2_SRC_ADDR_MIDDLE 0x24
250#define MVPP2_SRC_ADDR_HIGH 0x28
251#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
252 0x400 + (port) * 0x400)
253#define MVPP2_MIB_LATE_COLLISION 0x7c
254#define MVPP2_ISR_SUM_MASK_REG 0x220c
255#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
256#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
257
258/* Per-port registers */
259#define MVPP2_GMAC_CTRL_0_REG 0x0
260#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
261#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
262#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
263#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
264#define MVPP2_GMAC_CTRL_1_REG 0x4
265#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(0)
266#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
267#define MVPP2_GMAC_PCS_LB_EN_BIT 6
268#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
269#define MVPP2_GMAC_SA_LOW_OFFS 7
270#define MVPP2_GMAC_CTRL_2_REG 0x8
271#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
272#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
273#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
274#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
275#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
276#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
277#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
278#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
279#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
280#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
281#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
282#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
283#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
284#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
285#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
286#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
287 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
288
289#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
290
291/* Descriptor ring Macros */
292#define MVPP2_QUEUE_NEXT_DESC(q, index) \
293 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
294
295/* Various constants */
296
297/* Coalescing */
298#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
299#define MVPP2_RX_COAL_PKTS 32
300#define MVPP2_RX_COAL_USEC 100
301
302/* The two bytes Marvell header. Either contains a special value used
303 * by Marvell switches when a specific hardware mode is enabled (not
304 * supported by this driver) or is filled automatically by zeroes on
305 * the RX side. Those two bytes being at the front of the Ethernet
306 * header, they allow to have the IP header aligned on a 4 bytes
307 * boundary automatically: the hardware skips those two bytes on its
308 * own.
309 */
310#define MVPP2_MH_SIZE 2
311#define MVPP2_ETH_TYPE_LEN 2
312#define MVPP2_PPPOE_HDR_SIZE 8
313#define MVPP2_VLAN_TAG_LEN 4
314
315/* Lbtd 802.3 type */
316#define MVPP2_IP_LBDT_TYPE 0xfffa
317
318#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
319#define MVPP2_TX_CSUM_MAX_SIZE 9800
320
321/* Timeout constants */
322#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
323#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
324
325#define MVPP2_TX_MTU_MAX 0x7ffff
326
327/* Maximum number of T-CONTs of PON port */
328#define MVPP2_MAX_TCONT 16
329
330/* Maximum number of supported ports */
331#define MVPP2_MAX_PORTS 4
332
333/* Maximum number of TXQs used by single port */
334#define MVPP2_MAX_TXQ 8
335
336/* Maximum number of RXQs used by single port */
337#define MVPP2_MAX_RXQ 8
338
339/* Dfault number of RXQs in use */
340#define MVPP2_DEFAULT_RXQ 4
341
342/* Total number of RXQs available to all ports */
343#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
344
345/* Max number of Rx descriptors */
346#define MVPP2_MAX_RXD 128
347
348/* Max number of Tx descriptors */
349#define MVPP2_MAX_TXD 1024
350
351/* Amount of Tx descriptors that can be reserved at once by CPU */
352#define MVPP2_CPU_DESC_CHUNK 64
353
354/* Max number of Tx descriptors in each aggregated queue */
355#define MVPP2_AGGR_TXQ_SIZE 256
356
357/* Descriptor aligned size */
358#define MVPP2_DESC_ALIGNED_SIZE 32
359
360/* Descriptor alignment mask */
361#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
362
363/* RX FIFO constants */
364#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
365#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
366#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
367
368/* RX buffer constants */
369#define MVPP2_SKB_SHINFO_SIZE \
370 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
371
372#define MVPP2_RX_PKT_SIZE(mtu) \
373 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
374 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
375
376#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
377#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
378#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
379 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
380
381#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
382
383/* IPv6 max L3 address size */
384#define MVPP2_MAX_L3_ADDR_SIZE 16
385
386/* Port flags */
387#define MVPP2_F_LOOPBACK BIT(0)
388
389/* Marvell tag types */
390enum mvpp2_tag_type {
391 MVPP2_TAG_TYPE_NONE = 0,
392 MVPP2_TAG_TYPE_MH = 1,
393 MVPP2_TAG_TYPE_DSA = 2,
394 MVPP2_TAG_TYPE_EDSA = 3,
395 MVPP2_TAG_TYPE_VLAN = 4,
396 MVPP2_TAG_TYPE_LAST = 5
397};
398
399/* Parser constants */
400#define MVPP2_PRS_TCAM_SRAM_SIZE 256
401#define MVPP2_PRS_TCAM_WORDS 6
402#define MVPP2_PRS_SRAM_WORDS 4
403#define MVPP2_PRS_FLOW_ID_SIZE 64
404#define MVPP2_PRS_FLOW_ID_MASK 0x3f
405#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
406#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
407#define MVPP2_PRS_IPV4_HEAD 0x40
408#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
409#define MVPP2_PRS_IPV4_MC 0xe0
410#define MVPP2_PRS_IPV4_MC_MASK 0xf0
411#define MVPP2_PRS_IPV4_BC_MASK 0xff
412#define MVPP2_PRS_IPV4_IHL 0x5
413#define MVPP2_PRS_IPV4_IHL_MASK 0xf
414#define MVPP2_PRS_IPV6_MC 0xff
415#define MVPP2_PRS_IPV6_MC_MASK 0xff
416#define MVPP2_PRS_IPV6_HOP_MASK 0xff
417#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
418#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
419#define MVPP2_PRS_DBL_VLANS_MAX 100
420
421/* Tcam structure:
422 * - lookup ID - 4 bits
423 * - port ID - 1 byte
424 * - additional information - 1 byte
425 * - header data - 8 bytes
426 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
427 */
428#define MVPP2_PRS_AI_BITS 8
429#define MVPP2_PRS_PORT_MASK 0xff
430#define MVPP2_PRS_LU_MASK 0xf
431#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
432 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
433#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
434 (((offs) * 2) - ((offs) % 2) + 2)
435#define MVPP2_PRS_TCAM_AI_BYTE 16
436#define MVPP2_PRS_TCAM_PORT_BYTE 17
437#define MVPP2_PRS_TCAM_LU_BYTE 20
438#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
439#define MVPP2_PRS_TCAM_INV_WORD 5
440/* Tcam entries ID */
441#define MVPP2_PE_DROP_ALL 0
442#define MVPP2_PE_FIRST_FREE_TID 1
443#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
444#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
445#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
446#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
447#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
448#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
449#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
450#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
451#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
452#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
453#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
454#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
455#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
456#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
457#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
458#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
459#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
460#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
461#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
462#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
463#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
464#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
465#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
466#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
467#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
468
469/* Sram structure
470 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
471 */
472#define MVPP2_PRS_SRAM_RI_OFFS 0
473#define MVPP2_PRS_SRAM_RI_WORD 0
474#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
475#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
476#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
477#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
478#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
479#define MVPP2_PRS_SRAM_UDF_OFFS 73
480#define MVPP2_PRS_SRAM_UDF_BITS 8
481#define MVPP2_PRS_SRAM_UDF_MASK 0xff
482#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
483#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
484#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
485#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
486#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
487#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
488#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
489#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
490#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
491#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
492#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
493#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
494#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
495#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
496#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
497#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
498#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
499#define MVPP2_PRS_SRAM_AI_OFFS 90
500#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
501#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
502#define MVPP2_PRS_SRAM_AI_MASK 0xff
503#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
504#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
505#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
506#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
507
508/* Sram result info bits assignment */
509#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
510#define MVPP2_PRS_RI_DSA_MASK 0x2
511#define MVPP2_PRS_RI_VLAN_MASK 0xc
512#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
513#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
514#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
515#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
516#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
517#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
518#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
519#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
520#define MVPP2_PRS_RI_L2_MCAST BIT(9)
521#define MVPP2_PRS_RI_L2_BCAST BIT(10)
522#define MVPP2_PRS_RI_PPPOE_MASK 0x800
523#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
524#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
525#define MVPP2_PRS_RI_L3_IP4 BIT(12)
526#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
527#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
528#define MVPP2_PRS_RI_L3_IP6 BIT(14)
529#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
530#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
531#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
532#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
533#define MVPP2_PRS_RI_L3_MCAST BIT(15)
534#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
535#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
536#define MVPP2_PRS_RI_UDF3_MASK 0x300000
537#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
538#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
539#define MVPP2_PRS_RI_L4_TCP BIT(22)
540#define MVPP2_PRS_RI_L4_UDP BIT(23)
541#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
542#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
543#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
544#define MVPP2_PRS_RI_DROP_MASK 0x80000000
545
546/* Sram additional info bits assignment */
547#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
548#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
549#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
550#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
551#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
552#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
553#define MVPP2_PRS_SINGLE_VLAN_AI 0
554#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
555
556/* DSA/EDSA type */
557#define MVPP2_PRS_TAGGED true
558#define MVPP2_PRS_UNTAGGED false
559#define MVPP2_PRS_EDSA true
560#define MVPP2_PRS_DSA false
561
562/* MAC entries, shadow udf */
563enum mvpp2_prs_udf {
564 MVPP2_PRS_UDF_MAC_DEF,
565 MVPP2_PRS_UDF_MAC_RANGE,
566 MVPP2_PRS_UDF_L2_DEF,
567 MVPP2_PRS_UDF_L2_DEF_COPY,
568 MVPP2_PRS_UDF_L2_USER,
569};
570
571/* Lookup ID */
572enum mvpp2_prs_lookup {
573 MVPP2_PRS_LU_MH,
574 MVPP2_PRS_LU_MAC,
575 MVPP2_PRS_LU_DSA,
576 MVPP2_PRS_LU_VLAN,
577 MVPP2_PRS_LU_L2,
578 MVPP2_PRS_LU_PPPOE,
579 MVPP2_PRS_LU_IP4,
580 MVPP2_PRS_LU_IP6,
581 MVPP2_PRS_LU_FLOWS,
582 MVPP2_PRS_LU_LAST,
583};
584
585/* L3 cast enum */
586enum mvpp2_prs_l3_cast {
587 MVPP2_PRS_L3_UNI_CAST,
588 MVPP2_PRS_L3_MULTI_CAST,
589 MVPP2_PRS_L3_BROAD_CAST
590};
591
592/* Classifier constants */
593#define MVPP2_CLS_FLOWS_TBL_SIZE 512
594#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
595#define MVPP2_CLS_LKP_TBL_SIZE 64
596
597/* BM constants */
598#define MVPP2_BM_POOLS_NUM 8
599#define MVPP2_BM_LONG_BUF_NUM 1024
600#define MVPP2_BM_SHORT_BUF_NUM 2048
601#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
602#define MVPP2_BM_POOL_PTR_ALIGN 128
603#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
604#define MVPP2_BM_SWF_SHORT_POOL 3
605
606/* BM cookie (32 bits) definition */
607#define MVPP2_BM_COOKIE_POOL_OFFS 8
608#define MVPP2_BM_COOKIE_CPU_OFFS 24
609
610/* BM short pool packet size
611 * These value assure that for SWF the total number
612 * of bytes allocated for each buffer will be 512
613 */
614#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
615
616enum mvpp2_bm_type {
617 MVPP2_BM_FREE,
618 MVPP2_BM_SWF_LONG,
619 MVPP2_BM_SWF_SHORT
620};
621
622/* Definitions */
623
624/* Shared Packet Processor resources */
625struct mvpp2 {
626 /* Shared registers' base addresses */
627 void __iomem *base;
628 void __iomem *lms_base;
629
630 /* Common clocks */
631 struct clk *pp_clk;
632 struct clk *gop_clk;
633
634 /* List of pointers to port structures */
635 struct mvpp2_port **port_list;
636
637 /* Aggregated TXQs */
638 struct mvpp2_tx_queue *aggr_txqs;
639
640 /* BM pools */
641 struct mvpp2_bm_pool *bm_pools;
642
643 /* PRS shadow table */
644 struct mvpp2_prs_shadow *prs_shadow;
645 /* PRS auxiliary table for double vlan entries control */
646 bool *prs_double_vlans;
647
648 /* Tclk value */
649 u32 tclk;
650};
651
652struct mvpp2_pcpu_stats {
653 struct u64_stats_sync syncp;
654 u64 rx_packets;
655 u64 rx_bytes;
656 u64 tx_packets;
657 u64 tx_bytes;
658};
659
660struct mvpp2_port {
661 u8 id;
662
663 int irq;
664
665 struct mvpp2 *priv;
666
667 /* Per-port registers' base address */
668 void __iomem *base;
669
670 struct mvpp2_rx_queue **rxqs;
671 struct mvpp2_tx_queue **txqs;
672 struct net_device *dev;
673
674 int pkt_size;
675
676 u32 pending_cause_rx;
677 struct napi_struct napi;
678
679 /* Flags */
680 unsigned long flags;
681
682 u16 tx_ring_size;
683 u16 rx_ring_size;
684 struct mvpp2_pcpu_stats __percpu *stats;
685
686 struct phy_device *phy_dev;
687 phy_interface_t phy_interface;
688 struct device_node *phy_node;
689 unsigned int link;
690 unsigned int duplex;
691 unsigned int speed;
692
693 struct mvpp2_bm_pool *pool_long;
694 struct mvpp2_bm_pool *pool_short;
695
696 /* Index of first port's physical RXQ */
697 u8 first_rxq;
698};
699
700/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
701 * layout of the transmit and reception DMA descriptors, and their
702 * layout is therefore defined by the hardware design
703 */
704
705#define MVPP2_TXD_L3_OFF_SHIFT 0
706#define MVPP2_TXD_IP_HLEN_SHIFT 8
707#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
708#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
709#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
710#define MVPP2_TXD_PADDING_DISABLE BIT(23)
711#define MVPP2_TXD_L4_UDP BIT(24)
712#define MVPP2_TXD_L3_IP6 BIT(26)
713#define MVPP2_TXD_L_DESC BIT(28)
714#define MVPP2_TXD_F_DESC BIT(29)
715
716#define MVPP2_RXD_ERR_SUMMARY BIT(15)
717#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
718#define MVPP2_RXD_ERR_CRC 0x0
719#define MVPP2_RXD_ERR_OVERRUN BIT(13)
720#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
721#define MVPP2_RXD_BM_POOL_ID_OFFS 16
722#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
723#define MVPP2_RXD_HWF_SYNC BIT(21)
724#define MVPP2_RXD_L4_CSUM_OK BIT(22)
725#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
726#define MVPP2_RXD_L4_TCP BIT(25)
727#define MVPP2_RXD_L4_UDP BIT(26)
728#define MVPP2_RXD_L3_IP4 BIT(28)
729#define MVPP2_RXD_L3_IP6 BIT(30)
730#define MVPP2_RXD_BUF_HDR BIT(31)
731
732struct mvpp2_tx_desc {
733 u32 command; /* Options used by HW for packet transmitting.*/
734 u8 packet_offset; /* the offset from the buffer beginning */
735 u8 phys_txq; /* destination queue ID */
736 u16 data_size; /* data size of transmitted packet in bytes */
737 u32 buf_phys_addr; /* physical addr of transmitted buffer */
738 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
739 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
740 u32 reserved2; /* reserved (for future use) */
741};
742
743struct mvpp2_rx_desc {
744 u32 status; /* info about received packet */
745 u16 reserved1; /* parser_info (for future use, PnC) */
746 u16 data_size; /* size of received packet in bytes */
747 u32 buf_phys_addr; /* physical address of the buffer */
748 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
749 u16 reserved2; /* gem_port_id (for future use, PON) */
750 u16 reserved3; /* csum_l4 (for future use, PnC) */
751 u8 reserved4; /* bm_qset (for future use, BM) */
752 u8 reserved5;
753 u16 reserved6; /* classify_info (for future use, PnC) */
754 u32 reserved7; /* flow_id (for future use, PnC) */
755 u32 reserved8;
756};
757
758/* Per-CPU Tx queue control */
759struct mvpp2_txq_pcpu {
760 int cpu;
761
762 /* Number of Tx DMA descriptors in the descriptor ring */
763 int size;
764
765 /* Number of currently used Tx DMA descriptor in the
766 * descriptor ring
767 */
768 int count;
769
770 /* Number of Tx DMA descriptors reserved for each CPU */
771 int reserved_num;
772
773 /* Array of transmitted skb */
774 struct sk_buff **tx_skb;
775
776 /* Index of last TX DMA descriptor that was inserted */
777 int txq_put_index;
778
779 /* Index of the TX DMA descriptor to be cleaned up */
780 int txq_get_index;
781};
782
783struct mvpp2_tx_queue {
784 /* Physical number of this Tx queue */
785 u8 id;
786
787 /* Logical number of this Tx queue */
788 u8 log_id;
789
790 /* Number of Tx DMA descriptors in the descriptor ring */
791 int size;
792
793 /* Number of currently used Tx DMA descriptor in the descriptor ring */
794 int count;
795
796 /* Per-CPU control of physical Tx queues */
797 struct mvpp2_txq_pcpu __percpu *pcpu;
798
799 /* Array of transmitted skb */
800 struct sk_buff **tx_skb;
801
802 u32 done_pkts_coal;
803
804 /* Virtual address of thex Tx DMA descriptors array */
805 struct mvpp2_tx_desc *descs;
806
807 /* DMA address of the Tx DMA descriptors array */
808 dma_addr_t descs_phys;
809
810 /* Index of the last Tx DMA descriptor */
811 int last_desc;
812
813 /* Index of the next Tx DMA descriptor to process */
814 int next_desc_to_proc;
815};
816
817struct mvpp2_rx_queue {
818 /* RX queue number, in the range 0-31 for physical RXQs */
819 u8 id;
820
821 /* Num of rx descriptors in the rx descriptor ring */
822 int size;
823
824 u32 pkts_coal;
825 u32 time_coal;
826
827 /* Virtual address of the RX DMA descriptors array */
828 struct mvpp2_rx_desc *descs;
829
830 /* DMA address of the RX DMA descriptors array */
831 dma_addr_t descs_phys;
832
833 /* Index of the last RX DMA descriptor */
834 int last_desc;
835
836 /* Index of the next RX DMA descriptor to process */
837 int next_desc_to_proc;
838
839 /* ID of port to which physical RXQ is mapped */
840 int port;
841
842 /* Port's logic RXQ number to which physical RXQ is mapped */
843 int logic_rxq;
844};
845
846union mvpp2_prs_tcam_entry {
847 u32 word[MVPP2_PRS_TCAM_WORDS];
848 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
849};
850
851union mvpp2_prs_sram_entry {
852 u32 word[MVPP2_PRS_SRAM_WORDS];
853 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
854};
855
856struct mvpp2_prs_entry {
857 u32 index;
858 union mvpp2_prs_tcam_entry tcam;
859 union mvpp2_prs_sram_entry sram;
860};
861
862struct mvpp2_prs_shadow {
863 bool valid;
864 bool finish;
865
866 /* Lookup ID */
867 int lu;
868
869 /* User defined offset */
870 int udf;
871
872 /* Result info */
873 u32 ri;
874 u32 ri_mask;
875};
876
877struct mvpp2_cls_flow_entry {
878 u32 index;
879 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
880};
881
882struct mvpp2_cls_lookup_entry {
883 u32 lkpid;
884 u32 way;
885 u32 data;
886};
887
888struct mvpp2_bm_pool {
889 /* Pool number in the range 0-7 */
890 int id;
891 enum mvpp2_bm_type type;
892
893 /* Buffer Pointers Pool External (BPPE) size */
894 int size;
895 /* Number of buffers for this pool */
896 int buf_num;
897 /* Pool buffer size */
898 int buf_size;
899 /* Packet size */
900 int pkt_size;
901
902 /* BPPE virtual base address */
903 u32 *virt_addr;
904 /* BPPE physical base address */
905 dma_addr_t phys_addr;
906
907 /* Ports using BM pool */
908 u32 port_map;
909
910 /* Occupied buffers indicator */
911 atomic_t in_use;
912 int in_use_thresh;
913
914 spinlock_t lock;
915};
916
917struct mvpp2_buff_hdr {
918 u32 next_buff_phys_addr;
919 u32 next_buff_virt_addr;
920 u16 byte_count;
921 u16 info;
922 u8 reserved1; /* bm_qset (for future use, BM) */
923};
924
925/* Buffer header info bits */
926#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
927#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
928#define MVPP2_B_HDR_INFO_LAST_OFFS 12
929#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
930#define MVPP2_B_HDR_INFO_IS_LAST(info) \
931 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
932
933/* Static declaractions */
934
935/* Number of RXQs used by single port */
936static int rxq_number = MVPP2_DEFAULT_RXQ;
937/* Number of TXQs used by single port */
938static int txq_number = MVPP2_MAX_TXQ;
939
940#define MVPP2_DRIVER_NAME "mvpp2"
941#define MVPP2_DRIVER_VERSION "1.0"
942
943/* Utility/helper methods */
944
945static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
946{
947 writel(data, priv->base + offset);
948}
949
950static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
951{
952 return readl(priv->base + offset);
953}
954
955static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
956{
957 txq_pcpu->txq_get_index++;
958 if (txq_pcpu->txq_get_index == txq_pcpu->size)
959 txq_pcpu->txq_get_index = 0;
960}
961
962static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
963 struct sk_buff *skb)
964{
965 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
966 txq_pcpu->txq_put_index++;
967 if (txq_pcpu->txq_put_index == txq_pcpu->size)
968 txq_pcpu->txq_put_index = 0;
969}
970
971/* Get number of physical egress port */
972static inline int mvpp2_egress_port(struct mvpp2_port *port)
973{
974 return MVPP2_MAX_TCONT + port->id;
975}
976
977/* Get number of physical TXQ */
978static inline int mvpp2_txq_phys(int port, int txq)
979{
980 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
981}
982
983/* Parser configuration routines */
984
985/* Update parser tcam and sram hw entries */
986static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
987{
988 int i;
989
990 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
991 return -EINVAL;
992
993 /* Clear entry invalidation bit */
994 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
995
996 /* Write tcam index - indirect access */
997 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
998 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
999 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1000
1001 /* Write sram index - indirect access */
1002 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1003 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1004 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1005
1006 return 0;
1007}
1008
1009/* Read tcam entry from hw */
1010static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1011{
1012 int i;
1013
1014 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1015 return -EINVAL;
1016
1017 /* Write tcam index - indirect access */
1018 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1019
1020 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1021 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1022 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1023 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1024
1025 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1026 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1027
1028 /* Write sram index - indirect access */
1029 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1030 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1031 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1032
1033 return 0;
1034}
1035
1036/* Invalidate tcam hw entry */
1037static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1038{
1039 /* Write index - indirect access */
1040 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1041 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1042 MVPP2_PRS_TCAM_INV_MASK);
1043}
1044
1045/* Enable shadow table entry and set its lookup ID */
1046static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1047{
1048 priv->prs_shadow[index].valid = true;
1049 priv->prs_shadow[index].lu = lu;
1050}
1051
1052/* Update ri fields in shadow table entry */
1053static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1054 unsigned int ri, unsigned int ri_mask)
1055{
1056 priv->prs_shadow[index].ri_mask = ri_mask;
1057 priv->prs_shadow[index].ri = ri;
1058}
1059
1060/* Update lookup field in tcam sw entry */
1061static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1062{
1063 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1064
1065 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1066 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1067}
1068
1069/* Update mask for single port in tcam sw entry */
1070static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1071 unsigned int port, bool add)
1072{
1073 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1074
1075 if (add)
1076 pe->tcam.byte[enable_off] &= ~(1 << port);
1077 else
1078 pe->tcam.byte[enable_off] |= 1 << port;
1079}
1080
1081/* Update port map in tcam sw entry */
1082static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1083 unsigned int ports)
1084{
1085 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1086 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1087
1088 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1089 pe->tcam.byte[enable_off] &= ~port_mask;
1090 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1091}
1092
1093/* Obtain port map from tcam sw entry */
1094static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1095{
1096 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1097
1098 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1099}
1100
1101/* Set byte of data and its enable bits in tcam sw entry */
1102static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1103 unsigned int offs, unsigned char byte,
1104 unsigned char enable)
1105{
1106 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1107 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1108}
1109
1110/* Get byte of data and its enable bits from tcam sw entry */
1111static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1112 unsigned int offs, unsigned char *byte,
1113 unsigned char *enable)
1114{
1115 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1116 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1117}
1118
1119/* Compare tcam data bytes with a pattern */
1120static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1121 u16 data)
1122{
1123 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1124 u16 tcam_data;
1125
1126 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1127 if (tcam_data != data)
1128 return false;
1129 return true;
1130}
1131
1132/* Update ai bits in tcam sw entry */
1133static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1134 unsigned int bits, unsigned int enable)
1135{
1136 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1137
1138 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1139
1140 if (!(enable & BIT(i)))
1141 continue;
1142
1143 if (bits & BIT(i))
1144 pe->tcam.byte[ai_idx] |= 1 << i;
1145 else
1146 pe->tcam.byte[ai_idx] &= ~(1 << i);
1147 }
1148
1149 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1150}
1151
1152/* Get ai bits from tcam sw entry */
1153static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1154{
1155 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1156}
1157
1158/* Set ethertype in tcam sw entry */
1159static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1160 unsigned short ethertype)
1161{
1162 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1163 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1164}
1165
1166/* Set bits in sram sw entry */
1167static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1168 int val)
1169{
1170 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1171}
1172
1173/* Clear bits in sram sw entry */
1174static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1175 int val)
1176{
1177 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1178}
1179
1180/* Update ri bits in sram sw entry */
1181static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1182 unsigned int bits, unsigned int mask)
1183{
1184 unsigned int i;
1185
1186 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1187 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1188
1189 if (!(mask & BIT(i)))
1190 continue;
1191
1192 if (bits & BIT(i))
1193 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1194 else
1195 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1196
1197 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1198 }
1199}
1200
1201/* Obtain ri bits from sram sw entry */
1202static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1203{
1204 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1205}
1206
1207/* Update ai bits in sram sw entry */
1208static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1209 unsigned int bits, unsigned int mask)
1210{
1211 unsigned int i;
1212 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1213
1214 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1215
1216 if (!(mask & BIT(i)))
1217 continue;
1218
1219 if (bits & BIT(i))
1220 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1221 else
1222 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1223
1224 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1225 }
1226}
1227
1228/* Read ai bits from sram sw entry */
1229static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1230{
1231 u8 bits;
1232 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1233 int ai_en_off = ai_off + 1;
1234 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1235
1236 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1237 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1238
1239 return bits;
1240}
1241
1242/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1243 * lookup interation
1244 */
1245static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1246 unsigned int lu)
1247{
1248 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1249
1250 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1251 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1252 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1253}
1254
1255/* In the sram sw entry set sign and value of the next lookup offset
1256 * and the offset value generated to the classifier
1257 */
1258static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1259 unsigned int op)
1260{
1261 /* Set sign */
1262 if (shift < 0) {
1263 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1264 shift = 0 - shift;
1265 } else {
1266 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1267 }
1268
1269 /* Set value */
1270 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1271 (unsigned char)shift;
1272
1273 /* Reset and set operation */
1274 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1275 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1276 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1277
1278 /* Set base offset as current */
1279 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1280}
1281
1282/* In the sram sw entry set sign and value of the user defined offset
1283 * generated to the classifier
1284 */
1285static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1286 unsigned int type, int offset,
1287 unsigned int op)
1288{
1289 /* Set sign */
1290 if (offset < 0) {
1291 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1292 offset = 0 - offset;
1293 } else {
1294 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1295 }
1296
1297 /* Set value */
1298 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1299 MVPP2_PRS_SRAM_UDF_MASK);
1300 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1301 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1302 MVPP2_PRS_SRAM_UDF_BITS)] &=
1303 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1304 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1305 MVPP2_PRS_SRAM_UDF_BITS)] |=
1306 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1307
1308 /* Set offset type */
1309 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1310 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1311 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1312
1313 /* Set offset operation */
1314 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1315 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1316 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1317
1318 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1319 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1320 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1321 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1322
1323 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1324 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1325 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1326
1327 /* Set base offset as current */
1328 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1329}
1330
1331/* Find parser flow entry */
1332static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1333{
1334 struct mvpp2_prs_entry *pe;
1335 int tid;
1336
1337 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1338 if (!pe)
1339 return NULL;
1340 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1341
1342 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1343 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1344 u8 bits;
1345
1346 if (!priv->prs_shadow[tid].valid ||
1347 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1348 continue;
1349
1350 pe->index = tid;
1351 mvpp2_prs_hw_read(priv, pe);
1352 bits = mvpp2_prs_sram_ai_get(pe);
1353
1354 /* Sram store classification lookup ID in AI bits [5:0] */
1355 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1356 return pe;
1357 }
1358 kfree(pe);
1359
1360 return NULL;
1361}
1362
1363/* Return first free tcam index, seeking from start to end */
1364static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1365 unsigned char end)
1366{
1367 int tid;
1368
1369 if (start > end)
1370 swap(start, end);
1371
1372 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1373 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1374
1375 for (tid = start; tid <= end; tid++) {
1376 if (!priv->prs_shadow[tid].valid)
1377 return tid;
1378 }
1379
1380 return -EINVAL;
1381}
1382
1383/* Enable/disable dropping all mac da's */
1384static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1385{
1386 struct mvpp2_prs_entry pe;
1387
1388 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1389 /* Entry exist - update port only */
1390 pe.index = MVPP2_PE_DROP_ALL;
1391 mvpp2_prs_hw_read(priv, &pe);
1392 } else {
1393 /* Entry doesn't exist - create new */
1394 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1395 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1396 pe.index = MVPP2_PE_DROP_ALL;
1397
1398 /* Non-promiscuous mode for all ports - DROP unknown packets */
1399 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1400 MVPP2_PRS_RI_DROP_MASK);
1401
1402 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1403 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1404
1405 /* Update shadow table */
1406 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1407
1408 /* Mask all ports */
1409 mvpp2_prs_tcam_port_map_set(&pe, 0);
1410 }
1411
1412 /* Update port mask */
1413 mvpp2_prs_tcam_port_set(&pe, port, add);
1414
1415 mvpp2_prs_hw_write(priv, &pe);
1416}
1417
1418/* Set port to promiscuous mode */
1419static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1420{
1421 struct mvpp2_prs_entry pe;
1422
1423 /* Promiscous mode - Accept unknown packets */
1424
1425 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1426 /* Entry exist - update port only */
1427 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1428 mvpp2_prs_hw_read(priv, &pe);
1429 } else {
1430 /* Entry doesn't exist - create new */
1431 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1432 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1433 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1434
1435 /* Continue - set next lookup */
1436 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1437
1438 /* Set result info bits */
1439 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1440 MVPP2_PRS_RI_L2_CAST_MASK);
1441
1442 /* Shift to ethertype */
1443 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1444 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1445
1446 /* Mask all ports */
1447 mvpp2_prs_tcam_port_map_set(&pe, 0);
1448
1449 /* Update shadow table */
1450 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1451 }
1452
1453 /* Update port mask */
1454 mvpp2_prs_tcam_port_set(&pe, port, add);
1455
1456 mvpp2_prs_hw_write(priv, &pe);
1457}
1458
1459/* Accept multicast */
1460static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1461 bool add)
1462{
1463 struct mvpp2_prs_entry pe;
1464 unsigned char da_mc;
1465
1466 /* Ethernet multicast address first byte is
1467 * 0x01 for IPv4 and 0x33 for IPv6
1468 */
1469 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1470
1471 if (priv->prs_shadow[index].valid) {
1472 /* Entry exist - update port only */
1473 pe.index = index;
1474 mvpp2_prs_hw_read(priv, &pe);
1475 } else {
1476 /* Entry doesn't exist - create new */
1477 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1478 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1479 pe.index = index;
1480
1481 /* Continue - set next lookup */
1482 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1483
1484 /* Set result info bits */
1485 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1486 MVPP2_PRS_RI_L2_CAST_MASK);
1487
1488 /* Update tcam entry data first byte */
1489 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1490
1491 /* Shift to ethertype */
1492 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1493 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1494
1495 /* Mask all ports */
1496 mvpp2_prs_tcam_port_map_set(&pe, 0);
1497
1498 /* Update shadow table */
1499 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1500 }
1501
1502 /* Update port mask */
1503 mvpp2_prs_tcam_port_set(&pe, port, add);
1504
1505 mvpp2_prs_hw_write(priv, &pe);
1506}
1507
1508/* Set entry for dsa packets */
1509static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1510 bool tagged, bool extend)
1511{
1512 struct mvpp2_prs_entry pe;
1513 int tid, shift;
1514
1515 if (extend) {
1516 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1517 shift = 8;
1518 } else {
1519 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1520 shift = 4;
1521 }
1522
1523 if (priv->prs_shadow[tid].valid) {
1524 /* Entry exist - update port only */
1525 pe.index = tid;
1526 mvpp2_prs_hw_read(priv, &pe);
1527 } else {
1528 /* Entry doesn't exist - create new */
1529 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1530 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1531 pe.index = tid;
1532
1533 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1534 mvpp2_prs_sram_shift_set(&pe, shift,
1535 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1536
1537 /* Update shadow table */
1538 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1539
1540 if (tagged) {
1541 /* Set tagged bit in DSA tag */
1542 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1543 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1544 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1545 /* Clear all ai bits for next iteration */
1546 mvpp2_prs_sram_ai_update(&pe, 0,
1547 MVPP2_PRS_SRAM_AI_MASK);
1548 /* If packet is tagged continue check vlans */
1549 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1550 } else {
1551 /* Set result info bits to 'no vlans' */
1552 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1553 MVPP2_PRS_RI_VLAN_MASK);
1554 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1555 }
1556
1557 /* Mask all ports */
1558 mvpp2_prs_tcam_port_map_set(&pe, 0);
1559 }
1560
1561 /* Update port mask */
1562 mvpp2_prs_tcam_port_set(&pe, port, add);
1563
1564 mvpp2_prs_hw_write(priv, &pe);
1565}
1566
1567/* Set entry for dsa ethertype */
1568static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1569 bool add, bool tagged, bool extend)
1570{
1571 struct mvpp2_prs_entry pe;
1572 int tid, shift, port_mask;
1573
1574 if (extend) {
1575 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1576 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1577 port_mask = 0;
1578 shift = 8;
1579 } else {
1580 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1581 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1582 port_mask = MVPP2_PRS_PORT_MASK;
1583 shift = 4;
1584 }
1585
1586 if (priv->prs_shadow[tid].valid) {
1587 /* Entry exist - update port only */
1588 pe.index = tid;
1589 mvpp2_prs_hw_read(priv, &pe);
1590 } else {
1591 /* Entry doesn't exist - create new */
1592 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1593 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1594 pe.index = tid;
1595
1596 /* Set ethertype */
1597 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1598 mvpp2_prs_match_etype(&pe, 2, 0);
1599
1600 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1601 MVPP2_PRS_RI_DSA_MASK);
1602 /* Shift ethertype + 2 byte reserved + tag*/
1603 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1604 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1605
1606 /* Update shadow table */
1607 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1608
1609 if (tagged) {
1610 /* Set tagged bit in DSA tag */
1611 mvpp2_prs_tcam_data_byte_set(&pe,
1612 MVPP2_ETH_TYPE_LEN + 2 + 3,
1613 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1614 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1615 /* Clear all ai bits for next iteration */
1616 mvpp2_prs_sram_ai_update(&pe, 0,
1617 MVPP2_PRS_SRAM_AI_MASK);
1618 /* If packet is tagged continue check vlans */
1619 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1620 } else {
1621 /* Set result info bits to 'no vlans' */
1622 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1623 MVPP2_PRS_RI_VLAN_MASK);
1624 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1625 }
1626 /* Mask/unmask all ports, depending on dsa type */
1627 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1628 }
1629
1630 /* Update port mask */
1631 mvpp2_prs_tcam_port_set(&pe, port, add);
1632
1633 mvpp2_prs_hw_write(priv, &pe);
1634}
1635
1636/* Search for existing single/triple vlan entry */
1637static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1638 unsigned short tpid, int ai)
1639{
1640 struct mvpp2_prs_entry *pe;
1641 int tid;
1642
1643 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1644 if (!pe)
1645 return NULL;
1646 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1647
1648 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1649 for (tid = MVPP2_PE_FIRST_FREE_TID;
1650 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1651 unsigned int ri_bits, ai_bits;
1652 bool match;
1653
1654 if (!priv->prs_shadow[tid].valid ||
1655 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1656 continue;
1657
1658 pe->index = tid;
1659
1660 mvpp2_prs_hw_read(priv, pe);
1661 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1662 if (!match)
1663 continue;
1664
1665 /* Get vlan type */
1666 ri_bits = mvpp2_prs_sram_ri_get(pe);
1667 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1668
1669 /* Get current ai value from tcam */
1670 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1671 /* Clear double vlan bit */
1672 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1673
1674 if (ai != ai_bits)
1675 continue;
1676
1677 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1678 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1679 return pe;
1680 }
1681 kfree(pe);
1682
1683 return NULL;
1684}
1685
1686/* Add/update single/triple vlan entry */
1687static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1688 unsigned int port_map)
1689{
1690 struct mvpp2_prs_entry *pe;
1691 int tid_aux, tid;
1692
1693 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1694
1695 if (!pe) {
1696 /* Create new tcam entry */
1697 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1698 MVPP2_PE_FIRST_FREE_TID);
1699 if (tid < 0)
1700 return tid;
1701
1702 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1703 if (!pe)
1704 return -ENOMEM;
1705
1706 /* Get last double vlan tid */
1707 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1708 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1709 unsigned int ri_bits;
1710
1711 if (!priv->prs_shadow[tid_aux].valid ||
1712 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1713 continue;
1714
1715 pe->index = tid_aux;
1716 mvpp2_prs_hw_read(priv, pe);
1717 ri_bits = mvpp2_prs_sram_ri_get(pe);
1718 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1719 MVPP2_PRS_RI_VLAN_DOUBLE)
1720 break;
1721 }
1722
1723 if (tid <= tid_aux)
1724 return -EINVAL;
1725
1726 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1727 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1728 pe->index = tid;
1729
1730 mvpp2_prs_match_etype(pe, 0, tpid);
1731
1732 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1733 /* Shift 4 bytes - skip 1 vlan tag */
1734 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1735 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1736 /* Clear all ai bits for next iteration */
1737 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1738
1739 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1740 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1741 MVPP2_PRS_RI_VLAN_MASK);
1742 } else {
1743 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1744 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1745 MVPP2_PRS_RI_VLAN_MASK);
1746 }
1747 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1748
1749 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1750 }
1751 /* Update ports' mask */
1752 mvpp2_prs_tcam_port_map_set(pe, port_map);
1753
1754 mvpp2_prs_hw_write(priv, pe);
1755
1756 kfree(pe);
1757
1758 return 0;
1759}
1760
1761/* Get first free double vlan ai number */
1762static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1763{
1764 int i;
1765
1766 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1767 if (!priv->prs_double_vlans[i])
1768 return i;
1769 }
1770
1771 return -EINVAL;
1772}
1773
1774/* Search for existing double vlan entry */
1775static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1776 unsigned short tpid1,
1777 unsigned short tpid2)
1778{
1779 struct mvpp2_prs_entry *pe;
1780 int tid;
1781
1782 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1783 if (!pe)
1784 return NULL;
1785 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1786
1787 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1788 for (tid = MVPP2_PE_FIRST_FREE_TID;
1789 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1790 unsigned int ri_mask;
1791 bool match;
1792
1793 if (!priv->prs_shadow[tid].valid ||
1794 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1795 continue;
1796
1797 pe->index = tid;
1798 mvpp2_prs_hw_read(priv, pe);
1799
1800 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1801 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1802
1803 if (!match)
1804 continue;
1805
1806 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1807 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1808 return pe;
1809 }
1810 kfree(pe);
1811
1812 return NULL;
1813}
1814
1815/* Add or update double vlan entry */
1816static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1817 unsigned short tpid2,
1818 unsigned int port_map)
1819{
1820 struct mvpp2_prs_entry *pe;
1821 int tid_aux, tid, ai;
1822
1823 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1824
1825 if (!pe) {
1826 /* Create new tcam entry */
1827 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1828 MVPP2_PE_LAST_FREE_TID);
1829 if (tid < 0)
1830 return tid;
1831
1832 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1833 if (!pe)
1834 return -ENOMEM;
1835
1836 /* Set ai value for new double vlan entry */
1837 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1838 if (ai < 0)
1839 return ai;
1840
1841 /* Get first single/triple vlan tid */
1842 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1843 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1844 unsigned int ri_bits;
1845
1846 if (!priv->prs_shadow[tid_aux].valid ||
1847 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1848 continue;
1849
1850 pe->index = tid_aux;
1851 mvpp2_prs_hw_read(priv, pe);
1852 ri_bits = mvpp2_prs_sram_ri_get(pe);
1853 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1854 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1855 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1856 break;
1857 }
1858
1859 if (tid >= tid_aux)
1860 return -ERANGE;
1861
1862 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1863 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1864 pe->index = tid;
1865
1866 priv->prs_double_vlans[ai] = true;
1867
1868 mvpp2_prs_match_etype(pe, 0, tpid1);
1869 mvpp2_prs_match_etype(pe, 4, tpid2);
1870
1871 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1872 /* Shift 8 bytes - skip 2 vlan tags */
1873 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1874 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1875 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1876 MVPP2_PRS_RI_VLAN_MASK);
1877 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1878 MVPP2_PRS_SRAM_AI_MASK);
1879
1880 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1881 }
1882
1883 /* Update ports' mask */
1884 mvpp2_prs_tcam_port_map_set(pe, port_map);
1885 mvpp2_prs_hw_write(priv, pe);
1886
1887 kfree(pe);
1888 return 0;
1889}
1890
1891/* IPv4 header parsing for fragmentation and L4 offset */
1892static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1893 unsigned int ri, unsigned int ri_mask)
1894{
1895 struct mvpp2_prs_entry pe;
1896 int tid;
1897
1898 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1899 (proto != IPPROTO_IGMP))
1900 return -EINVAL;
1901
1902 /* Fragmented packet */
1903 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1904 MVPP2_PE_LAST_FREE_TID);
1905 if (tid < 0)
1906 return tid;
1907
1908 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1909 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1910 pe.index = tid;
1911
1912 /* Set next lu to IPv4 */
1913 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1914 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1915 /* Set L4 offset */
1916 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1917 sizeof(struct iphdr) - 4,
1918 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1919 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1920 MVPP2_PRS_IPV4_DIP_AI_BIT);
1921 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1922 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1923
1924 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1925 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1926 /* Unmask all ports */
1927 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1928
1929 /* Update shadow table and hw entry */
1930 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1931 mvpp2_prs_hw_write(priv, &pe);
1932
1933 /* Not fragmented packet */
1934 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1935 MVPP2_PE_LAST_FREE_TID);
1936 if (tid < 0)
1937 return tid;
1938
1939 pe.index = tid;
1940 /* Clear ri before updating */
1941 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1942 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1943 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1944
1945 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1946 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1947
1948 /* Update shadow table and hw entry */
1949 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1950 mvpp2_prs_hw_write(priv, &pe);
1951
1952 return 0;
1953}
1954
1955/* IPv4 L3 multicast or broadcast */
1956static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1957{
1958 struct mvpp2_prs_entry pe;
1959 int mask, tid;
1960
1961 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1962 MVPP2_PE_LAST_FREE_TID);
1963 if (tid < 0)
1964 return tid;
1965
1966 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1967 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1968 pe.index = tid;
1969
1970 switch (l3_cast) {
1971 case MVPP2_PRS_L3_MULTI_CAST:
1972 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
1973 MVPP2_PRS_IPV4_MC_MASK);
1974 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1975 MVPP2_PRS_RI_L3_ADDR_MASK);
1976 break;
1977 case MVPP2_PRS_L3_BROAD_CAST:
1978 mask = MVPP2_PRS_IPV4_BC_MASK;
1979 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
1980 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
1981 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
1982 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
1983 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
1984 MVPP2_PRS_RI_L3_ADDR_MASK);
1985 break;
1986 default:
1987 return -EINVAL;
1988 }
1989
1990 /* Finished: go to flowid generation */
1991 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1992 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1993
1994 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1995 MVPP2_PRS_IPV4_DIP_AI_BIT);
1996 /* Unmask all ports */
1997 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1998
1999 /* Update shadow table and hw entry */
2000 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2001 mvpp2_prs_hw_write(priv, &pe);
2002
2003 return 0;
2004}
2005
2006/* Set entries for protocols over IPv6 */
2007static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2008 unsigned int ri, unsigned int ri_mask)
2009{
2010 struct mvpp2_prs_entry pe;
2011 int tid;
2012
2013 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2014 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2015 return -EINVAL;
2016
2017 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2018 MVPP2_PE_LAST_FREE_TID);
2019 if (tid < 0)
2020 return tid;
2021
2022 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2023 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2024 pe.index = tid;
2025
2026 /* Finished: go to flowid generation */
2027 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2028 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2029 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2030 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2031 sizeof(struct ipv6hdr) - 6,
2032 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2033
2034 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2035 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2036 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2037 /* Unmask all ports */
2038 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2039
2040 /* Write HW */
2041 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2042 mvpp2_prs_hw_write(priv, &pe);
2043
2044 return 0;
2045}
2046
2047/* IPv6 L3 multicast entry */
2048static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2049{
2050 struct mvpp2_prs_entry pe;
2051 int tid;
2052
2053 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2054 return -EINVAL;
2055
2056 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2057 MVPP2_PE_LAST_FREE_TID);
2058 if (tid < 0)
2059 return tid;
2060
2061 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2062 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2063 pe.index = tid;
2064
2065 /* Finished: go to flowid generation */
2066 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2067 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2068 MVPP2_PRS_RI_L3_ADDR_MASK);
2069 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2070 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2071 /* Shift back to IPv6 NH */
2072 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2073
2074 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2075 MVPP2_PRS_IPV6_MC_MASK);
2076 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2077 /* Unmask all ports */
2078 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2079
2080 /* Update shadow table and hw entry */
2081 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2082 mvpp2_prs_hw_write(priv, &pe);
2083
2084 return 0;
2085}
2086
2087/* Parser per-port initialization */
2088static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2089 int lu_max, int offset)
2090{
2091 u32 val;
2092
2093 /* Set lookup ID */
2094 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2095 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2096 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2097 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2098
2099 /* Set maximum number of loops for packet received from port */
2100 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2101 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2102 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2103 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2104
2105 /* Set initial offset for packet header extraction for the first
2106 * searching loop
2107 */
2108 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2109 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2110 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2111 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2112}
2113
2114/* Default flow entries initialization for all ports */
2115static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2116{
2117 struct mvpp2_prs_entry pe;
2118 int port;
2119
2120 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2121 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2122 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2123 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2124
2125 /* Mask all ports */
2126 mvpp2_prs_tcam_port_map_set(&pe, 0);
2127
2128 /* Set flow ID*/
2129 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2130 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2131
2132 /* Update shadow table and hw entry */
2133 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2134 mvpp2_prs_hw_write(priv, &pe);
2135 }
2136}
2137
2138/* Set default entry for Marvell Header field */
2139static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2140{
2141 struct mvpp2_prs_entry pe;
2142
2143 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2144
2145 pe.index = MVPP2_PE_MH_DEFAULT;
2146 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2147 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2148 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2149 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2150
2151 /* Unmask all ports */
2152 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2153
2154 /* Update shadow table and hw entry */
2155 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2156 mvpp2_prs_hw_write(priv, &pe);
2157}
2158
2159/* Set default entires (place holder) for promiscuous, non-promiscuous and
2160 * multicast MAC addresses
2161 */
2162static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2163{
2164 struct mvpp2_prs_entry pe;
2165
2166 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2167
2168 /* Non-promiscuous mode for all ports - DROP unknown packets */
2169 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2170 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2171
2172 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2173 MVPP2_PRS_RI_DROP_MASK);
2174 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2175 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2176
2177 /* Unmask all ports */
2178 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2179
2180 /* Update shadow table and hw entry */
2181 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2182 mvpp2_prs_hw_write(priv, &pe);
2183
2184 /* place holders only - no ports */
2185 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2186 mvpp2_prs_mac_promisc_set(priv, 0, false);
2187 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2188 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2189}
2190
2191/* Set default entries for various types of dsa packets */
2192static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2193{
2194 struct mvpp2_prs_entry pe;
2195
2196 /* None tagged EDSA entry - place holder */
2197 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2198 MVPP2_PRS_EDSA);
2199
2200 /* Tagged EDSA entry - place holder */
2201 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2202
2203 /* None tagged DSA entry - place holder */
2204 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2205 MVPP2_PRS_DSA);
2206
2207 /* Tagged DSA entry - place holder */
2208 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2209
2210 /* None tagged EDSA ethertype entry - place holder*/
2211 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2212 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2213
2214 /* Tagged EDSA ethertype entry - place holder*/
2215 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2216 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2217
2218 /* None tagged DSA ethertype entry */
2219 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2220 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2221
2222 /* Tagged DSA ethertype entry */
2223 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2224 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2225
2226 /* Set default entry, in case DSA or EDSA tag not found */
2227 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2228 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2229 pe.index = MVPP2_PE_DSA_DEFAULT;
2230 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2231
2232 /* Shift 0 bytes */
2233 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2234 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2235
2236 /* Clear all sram ai bits for next iteration */
2237 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2238
2239 /* Unmask all ports */
2240 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2241
2242 mvpp2_prs_hw_write(priv, &pe);
2243}
2244
2245/* Match basic ethertypes */
2246static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2247{
2248 struct mvpp2_prs_entry pe;
2249 int tid;
2250
2251 /* Ethertype: PPPoE */
2252 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2253 MVPP2_PE_LAST_FREE_TID);
2254 if (tid < 0)
2255 return tid;
2256
2257 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2258 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2259 pe.index = tid;
2260
2261 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2262
2263 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2264 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2265 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2266 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2267 MVPP2_PRS_RI_PPPOE_MASK);
2268
2269 /* Update shadow table and hw entry */
2270 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2271 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2272 priv->prs_shadow[pe.index].finish = false;
2273 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2274 MVPP2_PRS_RI_PPPOE_MASK);
2275 mvpp2_prs_hw_write(priv, &pe);
2276
2277 /* Ethertype: ARP */
2278 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2279 MVPP2_PE_LAST_FREE_TID);
2280 if (tid < 0)
2281 return tid;
2282
2283 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2284 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2285 pe.index = tid;
2286
2287 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2288
2289 /* Generate flow in the next iteration*/
2290 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2291 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2292 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2293 MVPP2_PRS_RI_L3_PROTO_MASK);
2294 /* Set L3 offset */
2295 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2296 MVPP2_ETH_TYPE_LEN,
2297 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2298
2299 /* Update shadow table and hw entry */
2300 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2301 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2302 priv->prs_shadow[pe.index].finish = true;
2303 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2304 MVPP2_PRS_RI_L3_PROTO_MASK);
2305 mvpp2_prs_hw_write(priv, &pe);
2306
2307 /* Ethertype: LBTD */
2308 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2309 MVPP2_PE_LAST_FREE_TID);
2310 if (tid < 0)
2311 return tid;
2312
2313 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2314 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2315 pe.index = tid;
2316
2317 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2318
2319 /* Generate flow in the next iteration*/
2320 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2321 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2322 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2323 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2324 MVPP2_PRS_RI_CPU_CODE_MASK |
2325 MVPP2_PRS_RI_UDF3_MASK);
2326 /* Set L3 offset */
2327 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2328 MVPP2_ETH_TYPE_LEN,
2329 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2330
2331 /* Update shadow table and hw entry */
2332 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2333 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2334 priv->prs_shadow[pe.index].finish = true;
2335 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2336 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2337 MVPP2_PRS_RI_CPU_CODE_MASK |
2338 MVPP2_PRS_RI_UDF3_MASK);
2339 mvpp2_prs_hw_write(priv, &pe);
2340
2341 /* Ethertype: IPv4 without options */
2342 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2343 MVPP2_PE_LAST_FREE_TID);
2344 if (tid < 0)
2345 return tid;
2346
2347 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2348 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2349 pe.index = tid;
2350
2351 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2352 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2353 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2354 MVPP2_PRS_IPV4_HEAD_MASK |
2355 MVPP2_PRS_IPV4_IHL_MASK);
2356
2357 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2358 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2359 MVPP2_PRS_RI_L3_PROTO_MASK);
2360 /* Skip eth_type + 4 bytes of IP header */
2361 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2362 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2363 /* Set L3 offset */
2364 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2365 MVPP2_ETH_TYPE_LEN,
2366 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2367
2368 /* Update shadow table and hw entry */
2369 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2370 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2371 priv->prs_shadow[pe.index].finish = false;
2372 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2373 MVPP2_PRS_RI_L3_PROTO_MASK);
2374 mvpp2_prs_hw_write(priv, &pe);
2375
2376 /* Ethertype: IPv4 with options */
2377 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2378 MVPP2_PE_LAST_FREE_TID);
2379 if (tid < 0)
2380 return tid;
2381
2382 pe.index = tid;
2383
2384 /* Clear tcam data before updating */
2385 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2386 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2387
2388 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2389 MVPP2_PRS_IPV4_HEAD,
2390 MVPP2_PRS_IPV4_HEAD_MASK);
2391
2392 /* Clear ri before updating */
2393 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2394 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2395 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2396 MVPP2_PRS_RI_L3_PROTO_MASK);
2397
2398 /* Update shadow table and hw entry */
2399 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2400 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2401 priv->prs_shadow[pe.index].finish = false;
2402 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2403 MVPP2_PRS_RI_L3_PROTO_MASK);
2404 mvpp2_prs_hw_write(priv, &pe);
2405
2406 /* Ethertype: IPv6 without options */
2407 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2408 MVPP2_PE_LAST_FREE_TID);
2409 if (tid < 0)
2410 return tid;
2411
2412 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2413 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2414 pe.index = tid;
2415
2416 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2417
2418 /* Skip DIP of IPV6 header */
2419 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2420 MVPP2_MAX_L3_ADDR_SIZE,
2421 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2422 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2423 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2424 MVPP2_PRS_RI_L3_PROTO_MASK);
2425 /* Set L3 offset */
2426 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2427 MVPP2_ETH_TYPE_LEN,
2428 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2429
2430 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2431 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2432 priv->prs_shadow[pe.index].finish = false;
2433 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2434 MVPP2_PRS_RI_L3_PROTO_MASK);
2435 mvpp2_prs_hw_write(priv, &pe);
2436
2437 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2438 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2439 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2440 pe.index = MVPP2_PE_ETH_TYPE_UN;
2441
2442 /* Unmask all ports */
2443 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2444
2445 /* Generate flow in the next iteration*/
2446 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2447 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2448 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2449 MVPP2_PRS_RI_L3_PROTO_MASK);
2450 /* Set L3 offset even it's unknown L3 */
2451 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2452 MVPP2_ETH_TYPE_LEN,
2453 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2454
2455 /* Update shadow table and hw entry */
2456 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2457 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2458 priv->prs_shadow[pe.index].finish = true;
2459 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2460 MVPP2_PRS_RI_L3_PROTO_MASK);
2461 mvpp2_prs_hw_write(priv, &pe);
2462
2463 return 0;
2464}
2465
2466/* Configure vlan entries and detect up to 2 successive VLAN tags.
2467 * Possible options:
2468 * 0x8100, 0x88A8
2469 * 0x8100, 0x8100
2470 * 0x8100
2471 * 0x88A8
2472 */
2473static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2474{
2475 struct mvpp2_prs_entry pe;
2476 int err;
2477
2478 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2479 MVPP2_PRS_DBL_VLANS_MAX,
2480 GFP_KERNEL);
2481 if (!priv->prs_double_vlans)
2482 return -ENOMEM;
2483
2484 /* Double VLAN: 0x8100, 0x88A8 */
2485 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2486 MVPP2_PRS_PORT_MASK);
2487 if (err)
2488 return err;
2489
2490 /* Double VLAN: 0x8100, 0x8100 */
2491 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2492 MVPP2_PRS_PORT_MASK);
2493 if (err)
2494 return err;
2495
2496 /* Single VLAN: 0x88a8 */
2497 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2498 MVPP2_PRS_PORT_MASK);
2499 if (err)
2500 return err;
2501
2502 /* Single VLAN: 0x8100 */
2503 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2504 MVPP2_PRS_PORT_MASK);
2505 if (err)
2506 return err;
2507
2508 /* Set default double vlan entry */
2509 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2510 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2511 pe.index = MVPP2_PE_VLAN_DBL;
2512
2513 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2514 /* Clear ai for next iterations */
2515 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2516 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2517 MVPP2_PRS_RI_VLAN_MASK);
2518
2519 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2520 MVPP2_PRS_DBL_VLAN_AI_BIT);
2521 /* Unmask all ports */
2522 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2523
2524 /* Update shadow table and hw entry */
2525 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2526 mvpp2_prs_hw_write(priv, &pe);
2527
2528 /* Set default vlan none entry */
2529 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2530 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2531 pe.index = MVPP2_PE_VLAN_NONE;
2532
2533 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2534 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2535 MVPP2_PRS_RI_VLAN_MASK);
2536
2537 /* Unmask all ports */
2538 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2539
2540 /* Update shadow table and hw entry */
2541 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2542 mvpp2_prs_hw_write(priv, &pe);
2543
2544 return 0;
2545}
2546
2547/* Set entries for PPPoE ethertype */
2548static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2549{
2550 struct mvpp2_prs_entry pe;
2551 int tid;
2552
2553 /* IPv4 over PPPoE with options */
2554 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2555 MVPP2_PE_LAST_FREE_TID);
2556 if (tid < 0)
2557 return tid;
2558
2559 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2560 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2561 pe.index = tid;
2562
2563 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2564
2565 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2566 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2567 MVPP2_PRS_RI_L3_PROTO_MASK);
2568 /* Skip eth_type + 4 bytes of IP header */
2569 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2570 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2571 /* Set L3 offset */
2572 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2573 MVPP2_ETH_TYPE_LEN,
2574 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2575
2576 /* Update shadow table and hw entry */
2577 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2578 mvpp2_prs_hw_write(priv, &pe);
2579
2580 /* IPv4 over PPPoE without options */
2581 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2582 MVPP2_PE_LAST_FREE_TID);
2583 if (tid < 0)
2584 return tid;
2585
2586 pe.index = tid;
2587
2588 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2589 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2590 MVPP2_PRS_IPV4_HEAD_MASK |
2591 MVPP2_PRS_IPV4_IHL_MASK);
2592
2593 /* Clear ri before updating */
2594 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2595 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2596 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2597 MVPP2_PRS_RI_L3_PROTO_MASK);
2598
2599 /* Update shadow table and hw entry */
2600 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2601 mvpp2_prs_hw_write(priv, &pe);
2602
2603 /* IPv6 over PPPoE */
2604 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2605 MVPP2_PE_LAST_FREE_TID);
2606 if (tid < 0)
2607 return tid;
2608
2609 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2610 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2611 pe.index = tid;
2612
2613 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2614
2615 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2616 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2617 MVPP2_PRS_RI_L3_PROTO_MASK);
2618 /* Skip eth_type + 4 bytes of IPv6 header */
2619 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2620 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2621 /* Set L3 offset */
2622 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2623 MVPP2_ETH_TYPE_LEN,
2624 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2625
2626 /* Update shadow table and hw entry */
2627 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2628 mvpp2_prs_hw_write(priv, &pe);
2629
2630 /* Non-IP over PPPoE */
2631 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2632 MVPP2_PE_LAST_FREE_TID);
2633 if (tid < 0)
2634 return tid;
2635
2636 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2637 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2638 pe.index = tid;
2639
2640 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2641 MVPP2_PRS_RI_L3_PROTO_MASK);
2642
2643 /* Finished: go to flowid generation */
2644 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2645 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2646 /* Set L3 offset even if it's unknown L3 */
2647 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2648 MVPP2_ETH_TYPE_LEN,
2649 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2650
2651 /* Update shadow table and hw entry */
2652 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2653 mvpp2_prs_hw_write(priv, &pe);
2654
2655 return 0;
2656}
2657
2658/* Initialize entries for IPv4 */
2659static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2660{
2661 struct mvpp2_prs_entry pe;
2662 int err;
2663
2664 /* Set entries for TCP, UDP and IGMP over IPv4 */
2665 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2666 MVPP2_PRS_RI_L4_PROTO_MASK);
2667 if (err)
2668 return err;
2669
2670 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2671 MVPP2_PRS_RI_L4_PROTO_MASK);
2672 if (err)
2673 return err;
2674
2675 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2676 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2677 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2678 MVPP2_PRS_RI_CPU_CODE_MASK |
2679 MVPP2_PRS_RI_UDF3_MASK);
2680 if (err)
2681 return err;
2682
2683 /* IPv4 Broadcast */
2684 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2685 if (err)
2686 return err;
2687
2688 /* IPv4 Multicast */
2689 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2690 if (err)
2691 return err;
2692
2693 /* Default IPv4 entry for unknown protocols */
2694 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2695 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2696 pe.index = MVPP2_PE_IP4_PROTO_UN;
2697
2698 /* Set next lu to IPv4 */
2699 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2700 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2701 /* Set L4 offset */
2702 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2703 sizeof(struct iphdr) - 4,
2704 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2705 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2706 MVPP2_PRS_IPV4_DIP_AI_BIT);
2707 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2708 MVPP2_PRS_RI_L4_PROTO_MASK);
2709
2710 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2711 /* Unmask all ports */
2712 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2713
2714 /* Update shadow table and hw entry */
2715 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2716 mvpp2_prs_hw_write(priv, &pe);
2717
2718 /* Default IPv4 entry for unicast address */
2719 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2720 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2721 pe.index = MVPP2_PE_IP4_ADDR_UN;
2722
2723 /* Finished: go to flowid generation */
2724 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2725 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2726 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2727 MVPP2_PRS_RI_L3_ADDR_MASK);
2728
2729 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2730 MVPP2_PRS_IPV4_DIP_AI_BIT);
2731 /* Unmask all ports */
2732 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2733
2734 /* Update shadow table and hw entry */
2735 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2736 mvpp2_prs_hw_write(priv, &pe);
2737
2738 return 0;
2739}
2740
2741/* Initialize entries for IPv6 */
2742static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2743{
2744 struct mvpp2_prs_entry pe;
2745 int tid, err;
2746
2747 /* Set entries for TCP, UDP and ICMP over IPv6 */
2748 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2749 MVPP2_PRS_RI_L4_TCP,
2750 MVPP2_PRS_RI_L4_PROTO_MASK);
2751 if (err)
2752 return err;
2753
2754 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2755 MVPP2_PRS_RI_L4_UDP,
2756 MVPP2_PRS_RI_L4_PROTO_MASK);
2757 if (err)
2758 return err;
2759
2760 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2761 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2762 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2763 MVPP2_PRS_RI_CPU_CODE_MASK |
2764 MVPP2_PRS_RI_UDF3_MASK);
2765 if (err)
2766 return err;
2767
2768 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2769 /* Result Info: UDF7=1, DS lite */
2770 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2771 MVPP2_PRS_RI_UDF7_IP6_LITE,
2772 MVPP2_PRS_RI_UDF7_MASK);
2773 if (err)
2774 return err;
2775
2776 /* IPv6 multicast */
2777 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2778 if (err)
2779 return err;
2780
2781 /* Entry for checking hop limit */
2782 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2783 MVPP2_PE_LAST_FREE_TID);
2784 if (tid < 0)
2785 return tid;
2786
2787 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2788 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2789 pe.index = tid;
2790
2791 /* Finished: go to flowid generation */
2792 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2793 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2794 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2795 MVPP2_PRS_RI_DROP_MASK,
2796 MVPP2_PRS_RI_L3_PROTO_MASK |
2797 MVPP2_PRS_RI_DROP_MASK);
2798
2799 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2800 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2801 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2802
2803 /* Update shadow table and hw entry */
2804 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2805 mvpp2_prs_hw_write(priv, &pe);
2806
2807 /* Default IPv6 entry for unknown protocols */
2808 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2809 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2810 pe.index = MVPP2_PE_IP6_PROTO_UN;
2811
2812 /* Finished: go to flowid generation */
2813 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2814 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2815 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2816 MVPP2_PRS_RI_L4_PROTO_MASK);
2817 /* Set L4 offset relatively to our current place */
2818 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2819 sizeof(struct ipv6hdr) - 4,
2820 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2821
2822 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2823 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2824 /* Unmask all ports */
2825 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2826
2827 /* Update shadow table and hw entry */
2828 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2829 mvpp2_prs_hw_write(priv, &pe);
2830
2831 /* Default IPv6 entry for unknown ext protocols */
2832 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2833 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2834 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2835
2836 /* Finished: go to flowid generation */
2837 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2838 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2839 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2840 MVPP2_PRS_RI_L4_PROTO_MASK);
2841
2842 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2843 MVPP2_PRS_IPV6_EXT_AI_BIT);
2844 /* Unmask all ports */
2845 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2846
2847 /* Update shadow table and hw entry */
2848 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2849 mvpp2_prs_hw_write(priv, &pe);
2850
2851 /* Default IPv6 entry for unicast address */
2852 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2853 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2854 pe.index = MVPP2_PE_IP6_ADDR_UN;
2855
2856 /* Finished: go to IPv6 again */
2857 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2858 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2859 MVPP2_PRS_RI_L3_ADDR_MASK);
2860 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2861 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2862 /* Shift back to IPV6 NH */
2863 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2864
2865 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2866 /* Unmask all ports */
2867 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2868
2869 /* Update shadow table and hw entry */
2870 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2871 mvpp2_prs_hw_write(priv, &pe);
2872
2873 return 0;
2874}
2875
2876/* Parser default initialization */
2877static int mvpp2_prs_default_init(struct platform_device *pdev,
2878 struct mvpp2 *priv)
2879{
2880 int err, index, i;
2881
2882 /* Enable tcam table */
2883 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2884
2885 /* Clear all tcam and sram entries */
2886 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2887 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2888 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2889 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2890
2891 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2892 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2893 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2894 }
2895
2896 /* Invalidate all tcam entries */
2897 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2898 mvpp2_prs_hw_inv(priv, index);
2899
2900 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2901 sizeof(struct mvpp2_prs_shadow),
2902 GFP_KERNEL);
2903 if (!priv->prs_shadow)
2904 return -ENOMEM;
2905
2906 /* Always start from lookup = 0 */
2907 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2908 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2909 MVPP2_PRS_PORT_LU_MAX, 0);
2910
2911 mvpp2_prs_def_flow_init(priv);
2912
2913 mvpp2_prs_mh_init(priv);
2914
2915 mvpp2_prs_mac_init(priv);
2916
2917 mvpp2_prs_dsa_init(priv);
2918
2919 err = mvpp2_prs_etype_init(priv);
2920 if (err)
2921 return err;
2922
2923 err = mvpp2_prs_vlan_init(pdev, priv);
2924 if (err)
2925 return err;
2926
2927 err = mvpp2_prs_pppoe_init(priv);
2928 if (err)
2929 return err;
2930
2931 err = mvpp2_prs_ip6_init(priv);
2932 if (err)
2933 return err;
2934
2935 err = mvpp2_prs_ip4_init(priv);
2936 if (err)
2937 return err;
2938
2939 return 0;
2940}
2941
2942/* Compare MAC DA with tcam entry data */
2943static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2944 const u8 *da, unsigned char *mask)
2945{
2946 unsigned char tcam_byte, tcam_mask;
2947 int index;
2948
2949 for (index = 0; index < ETH_ALEN; index++) {
2950 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2951 if (tcam_mask != mask[index])
2952 return false;
2953
2954 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2955 return false;
2956 }
2957
2958 return true;
2959}
2960
2961/* Find tcam entry with matched pair <MAC DA, port> */
2962static struct mvpp2_prs_entry *
2963mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2964 unsigned char *mask, int udf_type)
2965{
2966 struct mvpp2_prs_entry *pe;
2967 int tid;
2968
2969 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2970 if (!pe)
2971 return NULL;
2972 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2973
2974 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2975 for (tid = MVPP2_PE_FIRST_FREE_TID;
2976 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2977 unsigned int entry_pmap;
2978
2979 if (!priv->prs_shadow[tid].valid ||
2980 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2981 (priv->prs_shadow[tid].udf != udf_type))
2982 continue;
2983
2984 pe->index = tid;
2985 mvpp2_prs_hw_read(priv, pe);
2986 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2987
2988 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2989 entry_pmap == pmap)
2990 return pe;
2991 }
2992 kfree(pe);
2993
2994 return NULL;
2995}
2996
2997/* Update parser's mac da entry */
2998static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2999 const u8 *da, bool add)
3000{
3001 struct mvpp2_prs_entry *pe;
3002 unsigned int pmap, len, ri;
3003 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3004 int tid;
3005
3006 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3007 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3008 MVPP2_PRS_UDF_MAC_DEF);
3009
3010 /* No such entry */
3011 if (!pe) {
3012 if (!add)
3013 return 0;
3014
3015 /* Create new TCAM entry */
3016 /* Find first range mac entry*/
3017 for (tid = MVPP2_PE_FIRST_FREE_TID;
3018 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3019 if (priv->prs_shadow[tid].valid &&
3020 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3021 (priv->prs_shadow[tid].udf ==
3022 MVPP2_PRS_UDF_MAC_RANGE))
3023 break;
3024
3025 /* Go through the all entries from first to last */
3026 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3027 tid - 1);
3028 if (tid < 0)
3029 return tid;
3030
3031 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3032 if (!pe)
3033 return -1;
3034 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3035 pe->index = tid;
3036
3037 /* Mask all ports */
3038 mvpp2_prs_tcam_port_map_set(pe, 0);
3039 }
3040
3041 /* Update port mask */
3042 mvpp2_prs_tcam_port_set(pe, port, add);
3043
3044 /* Invalidate the entry if no ports are left enabled */
3045 pmap = mvpp2_prs_tcam_port_map_get(pe);
3046 if (pmap == 0) {
3047 if (add) {
3048 kfree(pe);
3049 return -1;
3050 }
3051 mvpp2_prs_hw_inv(priv, pe->index);
3052 priv->prs_shadow[pe->index].valid = false;
3053 kfree(pe);
3054 return 0;
3055 }
3056
3057 /* Continue - set next lookup */
3058 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3059
3060 /* Set match on DA */
3061 len = ETH_ALEN;
3062 while (len--)
3063 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3064
3065 /* Set result info bits */
3066 if (is_broadcast_ether_addr(da))
3067 ri = MVPP2_PRS_RI_L2_BCAST;
3068 else if (is_multicast_ether_addr(da))
3069 ri = MVPP2_PRS_RI_L2_MCAST;
3070 else
3071 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3072
3073 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3074 MVPP2_PRS_RI_MAC_ME_MASK);
3075 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3076 MVPP2_PRS_RI_MAC_ME_MASK);
3077
3078 /* Shift to ethertype */
3079 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3080 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3081
3082 /* Update shadow table and hw entry */
3083 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3084 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3085 mvpp2_prs_hw_write(priv, pe);
3086
3087 kfree(pe);
3088
3089 return 0;
3090}
3091
3092static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3093{
3094 struct mvpp2_port *port = netdev_priv(dev);
3095 int err;
3096
3097 /* Remove old parser entry */
3098 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3099 false);
3100 if (err)
3101 return err;
3102
3103 /* Add new parser entry */
3104 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3105 if (err)
3106 return err;
3107
3108 /* Set addr in the device */
3109 ether_addr_copy(dev->dev_addr, da);
3110
3111 return 0;
3112}
3113
3114/* Delete all port's multicast simple (not range) entries */
3115static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3116{
3117 struct mvpp2_prs_entry pe;
3118 int index, tid;
3119
3120 for (tid = MVPP2_PE_FIRST_FREE_TID;
3121 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3122 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3123
3124 if (!priv->prs_shadow[tid].valid ||
3125 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3126 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3127 continue;
3128
3129 /* Only simple mac entries */
3130 pe.index = tid;
3131 mvpp2_prs_hw_read(priv, &pe);
3132
3133 /* Read mac addr from entry */
3134 for (index = 0; index < ETH_ALEN; index++)
3135 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3136 &da_mask[index]);
3137
3138 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3139 /* Delete this entry */
3140 mvpp2_prs_mac_da_accept(priv, port, da, false);
3141 }
3142}
3143
3144static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3145{
3146 switch (type) {
3147 case MVPP2_TAG_TYPE_EDSA:
3148 /* Add port to EDSA entries */
3149 mvpp2_prs_dsa_tag_set(priv, port, true,
3150 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3151 mvpp2_prs_dsa_tag_set(priv, port, true,
3152 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3153 /* Remove port from DSA entries */
3154 mvpp2_prs_dsa_tag_set(priv, port, false,
3155 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3156 mvpp2_prs_dsa_tag_set(priv, port, false,
3157 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3158 break;
3159
3160 case MVPP2_TAG_TYPE_DSA:
3161 /* Add port to DSA entries */
3162 mvpp2_prs_dsa_tag_set(priv, port, true,
3163 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3164 mvpp2_prs_dsa_tag_set(priv, port, true,
3165 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3166 /* Remove port from EDSA entries */
3167 mvpp2_prs_dsa_tag_set(priv, port, false,
3168 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3169 mvpp2_prs_dsa_tag_set(priv, port, false,
3170 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3171 break;
3172
3173 case MVPP2_TAG_TYPE_MH:
3174 case MVPP2_TAG_TYPE_NONE:
3175 /* Remove port form EDSA and DSA entries */
3176 mvpp2_prs_dsa_tag_set(priv, port, false,
3177 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3178 mvpp2_prs_dsa_tag_set(priv, port, false,
3179 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3180 mvpp2_prs_dsa_tag_set(priv, port, false,
3181 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3182 mvpp2_prs_dsa_tag_set(priv, port, false,
3183 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3184 break;
3185
3186 default:
3187 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3188 return -EINVAL;
3189 }
3190
3191 return 0;
3192}
3193
3194/* Set prs flow for the port */
3195static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3196{
3197 struct mvpp2_prs_entry *pe;
3198 int tid;
3199
3200 pe = mvpp2_prs_flow_find(port->priv, port->id);
3201
3202 /* Such entry not exist */
3203 if (!pe) {
3204 /* Go through the all entires from last to first */
3205 tid = mvpp2_prs_tcam_first_free(port->priv,
3206 MVPP2_PE_LAST_FREE_TID,
3207 MVPP2_PE_FIRST_FREE_TID);
3208 if (tid < 0)
3209 return tid;
3210
3211 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3212 if (!pe)
3213 return -ENOMEM;
3214
3215 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3216 pe->index = tid;
3217
3218 /* Set flow ID*/
3219 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3220 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3221
3222 /* Update shadow table */
3223 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3224 }
3225
3226 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3227 mvpp2_prs_hw_write(port->priv, pe);
3228 kfree(pe);
3229
3230 return 0;
3231}
3232
3233/* Classifier configuration routines */
3234
3235/* Update classification flow table registers */
3236static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3237 struct mvpp2_cls_flow_entry *fe)
3238{
3239 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3240 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3241 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3242 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3243}
3244
3245/* Update classification lookup table register */
3246static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3247 struct mvpp2_cls_lookup_entry *le)
3248{
3249 u32 val;
3250
3251 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3252 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3253 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3254}
3255
3256/* Classifier default initialization */
3257static void mvpp2_cls_init(struct mvpp2 *priv)
3258{
3259 struct mvpp2_cls_lookup_entry le;
3260 struct mvpp2_cls_flow_entry fe;
3261 int index;
3262
3263 /* Enable classifier */
3264 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3265
3266 /* Clear classifier flow table */
3267 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
3268 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3269 fe.index = index;
3270 mvpp2_cls_flow_write(priv, &fe);
3271 }
3272
3273 /* Clear classifier lookup table */
3274 le.data = 0;
3275 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3276 le.lkpid = index;
3277 le.way = 0;
3278 mvpp2_cls_lookup_write(priv, &le);
3279
3280 le.way = 1;
3281 mvpp2_cls_lookup_write(priv, &le);
3282 }
3283}
3284
3285static void mvpp2_cls_port_config(struct mvpp2_port *port)
3286{
3287 struct mvpp2_cls_lookup_entry le;
3288 u32 val;
3289
3290 /* Set way for the port */
3291 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3292 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3293 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3294
3295 /* Pick the entry to be accessed in lookup ID decoding table
3296 * according to the way and lkpid.
3297 */
3298 le.lkpid = port->id;
3299 le.way = 0;
3300 le.data = 0;
3301
3302 /* Set initial CPU queue for receiving packets */
3303 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3304 le.data |= port->first_rxq;
3305
3306 /* Disable classification engines */
3307 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3308
3309 /* Update lookup ID table entry */
3310 mvpp2_cls_lookup_write(port->priv, &le);
3311}
3312
3313/* Set CPU queue number for oversize packets */
3314static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3315{
3316 u32 val;
3317
3318 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3319 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3320
3321 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3322 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3323
3324 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3325 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3326 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3327}
3328
3329/* Buffer Manager configuration routines */
3330
3331/* Create pool */
3332static int mvpp2_bm_pool_create(struct platform_device *pdev,
3333 struct mvpp2 *priv,
3334 struct mvpp2_bm_pool *bm_pool, int size)
3335{
3336 int size_bytes;
3337 u32 val;
3338
3339 size_bytes = sizeof(u32) * size;
3340 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3341 &bm_pool->phys_addr,
3342 GFP_KERNEL);
3343 if (!bm_pool->virt_addr)
3344 return -ENOMEM;
3345
3346 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3347 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3348 bm_pool->phys_addr);
3349 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3350 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3351 return -ENOMEM;
3352 }
3353
3354 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3355 bm_pool->phys_addr);
3356 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3357
3358 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3359 val |= MVPP2_BM_START_MASK;
3360 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3361
3362 bm_pool->type = MVPP2_BM_FREE;
3363 bm_pool->size = size;
3364 bm_pool->pkt_size = 0;
3365 bm_pool->buf_num = 0;
3366 atomic_set(&bm_pool->in_use, 0);
3367 spin_lock_init(&bm_pool->lock);
3368
3369 return 0;
3370}
3371
3372/* Set pool buffer size */
3373static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3374 struct mvpp2_bm_pool *bm_pool,
3375 int buf_size)
3376{
3377 u32 val;
3378
3379 bm_pool->buf_size = buf_size;
3380
3381 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3382 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3383}
3384
3385/* Free "num" buffers from the pool */
3386static int mvpp2_bm_bufs_free(struct mvpp2 *priv,
3387 struct mvpp2_bm_pool *bm_pool, int num)
3388{
3389 int i;
3390
3391 if (num >= bm_pool->buf_num)
3392 /* Free all buffers from the pool */
3393 num = bm_pool->buf_num;
3394
3395 for (i = 0; i < num; i++) {
3396 u32 vaddr;
3397
3398 /* Get buffer virtual adress (indirect access) */
3399 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3400 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3401 if (!vaddr)
3402 break;
3403 dev_kfree_skb_any((struct sk_buff *)vaddr);
3404 }
3405
3406 /* Update BM driver with number of buffers removed from pool */
3407 bm_pool->buf_num -= i;
3408 return i;
3409}
3410
3411/* Cleanup pool */
3412static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3413 struct mvpp2 *priv,
3414 struct mvpp2_bm_pool *bm_pool)
3415{
3416 int num;
3417 u32 val;
3418
3419 num = mvpp2_bm_bufs_free(priv, bm_pool, bm_pool->buf_num);
3420 if (num != bm_pool->buf_num) {
3421 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3422 return 0;
3423 }
3424
3425 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3426 val |= MVPP2_BM_STOP_MASK;
3427 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3428
3429 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3430 bm_pool->virt_addr,
3431 bm_pool->phys_addr);
3432 return 0;
3433}
3434
3435static int mvpp2_bm_pools_init(struct platform_device *pdev,
3436 struct mvpp2 *priv)
3437{
3438 int i, err, size;
3439 struct mvpp2_bm_pool *bm_pool;
3440
3441 /* Create all pools with maximum size */
3442 size = MVPP2_BM_POOL_SIZE_MAX;
3443 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3444 bm_pool = &priv->bm_pools[i];
3445 bm_pool->id = i;
3446 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3447 if (err)
3448 goto err_unroll_pools;
3449 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3450 }
3451 return 0;
3452
3453err_unroll_pools:
3454 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3455 for (i = i - 1; i >= 0; i--)
3456 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3457 return err;
3458}
3459
3460static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3461{
3462 int i, err;
3463
3464 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3465 /* Mask BM all interrupts */
3466 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3467 /* Clear BM cause register */
3468 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3469 }
3470
3471 /* Allocate and initialize BM pools */
3472 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3473 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3474 if (!priv->bm_pools)
3475 return -ENOMEM;
3476
3477 err = mvpp2_bm_pools_init(pdev, priv);
3478 if (err < 0)
3479 return err;
3480 return 0;
3481}
3482
3483/* Attach long pool to rxq */
3484static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3485 int lrxq, int long_pool)
3486{
3487 u32 val;
3488 int prxq;
3489
3490 /* Get queue physical ID */
3491 prxq = port->rxqs[lrxq]->id;
3492
3493 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3494 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3495 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3496 MVPP2_RXQ_POOL_LONG_MASK);
3497
3498 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3499}
3500
3501/* Attach short pool to rxq */
3502static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3503 int lrxq, int short_pool)
3504{
3505 u32 val;
3506 int prxq;
3507
3508 /* Get queue physical ID */
3509 prxq = port->rxqs[lrxq]->id;
3510
3511 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3512 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3513 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3514 MVPP2_RXQ_POOL_SHORT_MASK);
3515
3516 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3517}
3518
3519/* Allocate skb for BM pool */
3520static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3521 struct mvpp2_bm_pool *bm_pool,
3522 dma_addr_t *buf_phys_addr,
3523 gfp_t gfp_mask)
3524{
3525 struct sk_buff *skb;
3526 dma_addr_t phys_addr;
3527
3528 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3529 if (!skb)
3530 return NULL;
3531
3532 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3533 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3534 DMA_FROM_DEVICE);
3535 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3536 dev_kfree_skb_any(skb);
3537 return NULL;
3538 }
3539 *buf_phys_addr = phys_addr;
3540
3541 return skb;
3542}
3543
3544/* Set pool number in a BM cookie */
3545static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3546{
3547 u32 bm;
3548
3549 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3550 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3551
3552 return bm;
3553}
3554
3555/* Get pool number from a BM cookie */
3556static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3557{
3558 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3559}
3560
3561/* Release buffer to BM */
3562static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3563 u32 buf_phys_addr, u32 buf_virt_addr)
3564{
3565 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3566 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3567}
3568
3569/* Release multicast buffer */
3570static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3571 u32 buf_phys_addr, u32 buf_virt_addr,
3572 int mc_id)
3573{
3574 u32 val = 0;
3575
3576 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3577 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3578
3579 mvpp2_bm_pool_put(port, pool,
3580 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3581 buf_virt_addr);
3582}
3583
3584/* Refill BM pool */
3585static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3586 u32 phys_addr, u32 cookie)
3587{
3588 int pool = mvpp2_bm_cookie_pool_get(bm);
3589
3590 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3591}
3592
3593/* Allocate buffers for the pool */
3594static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3595 struct mvpp2_bm_pool *bm_pool, int buf_num)
3596{
3597 struct sk_buff *skb;
3598 int i, buf_size, total_size;
3599 u32 bm;
3600 dma_addr_t phys_addr;
3601
3602 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3603 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3604
3605 if (buf_num < 0 ||
3606 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3607 netdev_err(port->dev,
3608 "cannot allocate %d buffers for pool %d\n",
3609 buf_num, bm_pool->id);
3610 return 0;
3611 }
3612
3613 bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
3614 for (i = 0; i < buf_num; i++) {
3615 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3616 if (!skb)
3617 break;
3618
3619 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
3620 }
3621
3622 /* Update BM driver with number of buffers added to pool */
3623 bm_pool->buf_num += i;
3624 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
3625
3626 netdev_dbg(port->dev,
3627 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3628 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3629 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3630
3631 netdev_dbg(port->dev,
3632 "%s pool %d: %d of %d buffers added\n",
3633 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3634 bm_pool->id, i, buf_num);
3635 return i;
3636}
3637
3638/* Notify the driver that BM pool is being used as specific type and return the
3639 * pool pointer on success
3640 */
3641static struct mvpp2_bm_pool *
3642mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3643 int pkt_size)
3644{
3645 unsigned long flags = 0;
3646 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3647 int num;
3648
3649 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3650 netdev_err(port->dev, "mixing pool types is forbidden\n");
3651 return NULL;
3652 }
3653
3654 spin_lock_irqsave(&new_pool->lock, flags);
3655
3656 if (new_pool->type == MVPP2_BM_FREE)
3657 new_pool->type = type;
3658
3659 /* Allocate buffers in case BM pool is used as long pool, but packet
3660 * size doesn't match MTU or BM pool hasn't being used yet
3661 */
3662 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3663 (new_pool->pkt_size == 0)) {
3664 int pkts_num;
3665
3666 /* Set default buffer number or free all the buffers in case
3667 * the pool is not empty
3668 */
3669 pkts_num = new_pool->buf_num;
3670 if (pkts_num == 0)
3671 pkts_num = type == MVPP2_BM_SWF_LONG ?
3672 MVPP2_BM_LONG_BUF_NUM :
3673 MVPP2_BM_SHORT_BUF_NUM;
3674 else
3675 mvpp2_bm_bufs_free(port->priv, new_pool, pkts_num);
3676
3677 new_pool->pkt_size = pkt_size;
3678
3679 /* Allocate buffers for this pool */
3680 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3681 if (num != pkts_num) {
3682 WARN(1, "pool %d: %d of %d allocated\n",
3683 new_pool->id, num, pkts_num);
3684 /* We need to undo the bufs_add() allocations */
3685 spin_unlock_irqrestore(&new_pool->lock, flags);
3686 return NULL;
3687 }
3688 }
3689
3690 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3691 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3692
3693 spin_unlock_irqrestore(&new_pool->lock, flags);
3694
3695 return new_pool;
3696}
3697
3698/* Initialize pools for swf */
3699static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3700{
3701 unsigned long flags = 0;
3702 int rxq;
3703
3704 if (!port->pool_long) {
3705 port->pool_long =
3706 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3707 MVPP2_BM_SWF_LONG,
3708 port->pkt_size);
3709 if (!port->pool_long)
3710 return -ENOMEM;
3711
3712 spin_lock_irqsave(&port->pool_long->lock, flags);
3713 port->pool_long->port_map |= (1 << port->id);
3714 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3715
3716 for (rxq = 0; rxq < rxq_number; rxq++)
3717 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3718 }
3719
3720 if (!port->pool_short) {
3721 port->pool_short =
3722 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3723 MVPP2_BM_SWF_SHORT,
3724 MVPP2_BM_SHORT_PKT_SIZE);
3725 if (!port->pool_short)
3726 return -ENOMEM;
3727
3728 spin_lock_irqsave(&port->pool_short->lock, flags);
3729 port->pool_short->port_map |= (1 << port->id);
3730 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3731
3732 for (rxq = 0; rxq < rxq_number; rxq++)
3733 mvpp2_rxq_short_pool_set(port, rxq,
3734 port->pool_short->id);
3735 }
3736
3737 return 0;
3738}
3739
3740static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3741{
3742 struct mvpp2_port *port = netdev_priv(dev);
3743 struct mvpp2_bm_pool *port_pool = port->pool_long;
3744 int num, pkts_num = port_pool->buf_num;
3745 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3746
3747 /* Update BM pool with new buffer size */
3748 num = mvpp2_bm_bufs_free(port->priv, port_pool, pkts_num);
3749 if (num != pkts_num) {
3750 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3751 return -EIO;
3752 }
3753
3754 port_pool->pkt_size = pkt_size;
3755 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3756 if (num != pkts_num) {
3757 WARN(1, "pool %d: %d of %d allocated\n",
3758 port_pool->id, num, pkts_num);
3759 return -EIO;
3760 }
3761
3762 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3763 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3764 dev->mtu = mtu;
3765 netdev_update_features(dev);
3766 return 0;
3767}
3768
3769static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3770{
3771 int cpu, cpu_mask = 0;
3772
3773 for_each_present_cpu(cpu)
3774 cpu_mask |= 1 << cpu;
3775 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3776 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3777}
3778
3779static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3780{
3781 int cpu, cpu_mask = 0;
3782
3783 for_each_present_cpu(cpu)
3784 cpu_mask |= 1 << cpu;
3785 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3786 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3787}
3788
3789/* Mask the current CPU's Rx/Tx interrupts */
3790static void mvpp2_interrupts_mask(void *arg)
3791{
3792 struct mvpp2_port *port = arg;
3793
3794 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3795}
3796
3797/* Unmask the current CPU's Rx/Tx interrupts */
3798static void mvpp2_interrupts_unmask(void *arg)
3799{
3800 struct mvpp2_port *port = arg;
3801
3802 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3803 (MVPP2_CAUSE_MISC_SUM_MASK |
3804 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3805 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3806}
3807
3808/* Port configuration routines */
3809
3810static void mvpp2_port_mii_set(struct mvpp2_port *port)
3811{
3812 u32 reg, val = 0;
3813
3814 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3815 val = MVPP2_GMAC_PCS_ENABLE_MASK |
3816 MVPP2_GMAC_INBAND_AN_MASK;
3817 else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
3818 val = MVPP2_GMAC_PORT_RGMII_MASK;
3819
3820 reg = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3821 writel(reg | val, port->base + MVPP2_GMAC_CTRL_2_REG);
3822}
3823
3824static void mvpp2_port_enable(struct mvpp2_port *port)
3825{
3826 u32 val;
3827
3828 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3829 val |= MVPP2_GMAC_PORT_EN_MASK;
3830 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3831 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3832}
3833
3834static void mvpp2_port_disable(struct mvpp2_port *port)
3835{
3836 u32 val;
3837
3838 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3839 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3840 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3841}
3842
3843/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3844static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3845{
3846 u32 val;
3847
3848 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3849 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3850 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3851}
3852
3853/* Configure loopback port */
3854static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3855{
3856 u32 val;
3857
3858 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3859
3860 if (port->speed == 1000)
3861 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3862 else
3863 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3864
3865 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3866 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3867 else
3868 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3869
3870 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3871}
3872
3873static void mvpp2_port_reset(struct mvpp2_port *port)
3874{
3875 u32 val;
3876
3877 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3878 ~MVPP2_GMAC_PORT_RESET_MASK;
3879 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3880
3881 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3882 MVPP2_GMAC_PORT_RESET_MASK)
3883 continue;
3884}
3885
3886/* Change maximum receive size of the port */
3887static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3888{
3889 u32 val;
3890
3891 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3892 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3893 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3894 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3895 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3896}
3897
3898/* Set defaults to the MVPP2 port */
3899static void mvpp2_defaults_set(struct mvpp2_port *port)
3900{
3901 int tx_port_num, val, queue, ptxq, lrxq;
3902
3903 /* Configure port to loopback if needed */
3904 if (port->flags & MVPP2_F_LOOPBACK)
3905 mvpp2_port_loopback_set(port);
3906
3907 /* Update TX FIFO MIN Threshold */
3908 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3909 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3910 /* Min. TX threshold must be less than minimal packet length */
3911 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3912 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3913
3914 /* Disable Legacy WRR, Disable EJP, Release from reset */
3915 tx_port_num = mvpp2_egress_port(port);
3916 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3917 tx_port_num);
3918 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3919
3920 /* Close bandwidth for all queues */
3921 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3922 ptxq = mvpp2_txq_phys(port->id, queue);
3923 mvpp2_write(port->priv,
3924 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3925 }
3926
3927 /* Set refill period to 1 usec, refill tokens
3928 * and bucket size to maximum
3929 */
3930 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3931 port->priv->tclk / USEC_PER_SEC);
3932 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3933 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3934 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3935 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3936 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3937 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3938 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3939
3940 /* Set MaximumLowLatencyPacketSize value to 256 */
3941 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3942 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3943 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3944
3945 /* Enable Rx cache snoop */
3946 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3947 queue = port->rxqs[lrxq]->id;
3948 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3949 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3950 MVPP2_SNOOP_BUF_HDR_MASK;
3951 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3952 }
3953
3954 /* At default, mask all interrupts to all present cpus */
3955 mvpp2_interrupts_disable(port);
3956}
3957
3958/* Enable/disable receiving packets */
3959static void mvpp2_ingress_enable(struct mvpp2_port *port)
3960{
3961 u32 val;
3962 int lrxq, queue;
3963
3964 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3965 queue = port->rxqs[lrxq]->id;
3966 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3967 val &= ~MVPP2_RXQ_DISABLE_MASK;
3968 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3969 }
3970}
3971
3972static void mvpp2_ingress_disable(struct mvpp2_port *port)
3973{
3974 u32 val;
3975 int lrxq, queue;
3976
3977 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3978 queue = port->rxqs[lrxq]->id;
3979 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3980 val |= MVPP2_RXQ_DISABLE_MASK;
3981 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3982 }
3983}
3984
3985/* Enable transmit via physical egress queue
3986 * - HW starts take descriptors from DRAM
3987 */
3988static void mvpp2_egress_enable(struct mvpp2_port *port)
3989{
3990 u32 qmap;
3991 int queue;
3992 int tx_port_num = mvpp2_egress_port(port);
3993
3994 /* Enable all initialized TXs. */
3995 qmap = 0;
3996 for (queue = 0; queue < txq_number; queue++) {
3997 struct mvpp2_tx_queue *txq = port->txqs[queue];
3998
3999 if (txq->descs != NULL)
4000 qmap |= (1 << queue);
4001 }
4002
4003 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4004 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4005}
4006
4007/* Disable transmit via physical egress queue
4008 * - HW doesn't take descriptors from DRAM
4009 */
4010static void mvpp2_egress_disable(struct mvpp2_port *port)
4011{
4012 u32 reg_data;
4013 int delay;
4014 int tx_port_num = mvpp2_egress_port(port);
4015
4016 /* Issue stop command for active channels only */
4017 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4018 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4019 MVPP2_TXP_SCHED_ENQ_MASK;
4020 if (reg_data != 0)
4021 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4022 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4023
4024 /* Wait for all Tx activity to terminate. */
4025 delay = 0;
4026 do {
4027 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4028 netdev_warn(port->dev,
4029 "Tx stop timed out, status=0x%08x\n",
4030 reg_data);
4031 break;
4032 }
4033 mdelay(1);
4034 delay++;
4035
4036 /* Check port TX Command register that all
4037 * Tx queues are stopped
4038 */
4039 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4040 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4041}
4042
4043/* Rx descriptors helper methods */
4044
4045/* Get number of Rx descriptors occupied by received packets */
4046static inline int
4047mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4048{
4049 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4050
4051 return val & MVPP2_RXQ_OCCUPIED_MASK;
4052}
4053
4054/* Update Rx queue status with the number of occupied and available
4055 * Rx descriptor slots.
4056 */
4057static inline void
4058mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4059 int used_count, int free_count)
4060{
4061 /* Decrement the number of used descriptors and increment count
4062 * increment the number of free descriptors.
4063 */
4064 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4065
4066 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4067}
4068
4069/* Get pointer to next RX descriptor to be processed by SW */
4070static inline struct mvpp2_rx_desc *
4071mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4072{
4073 int rx_desc = rxq->next_desc_to_proc;
4074
4075 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4076 prefetch(rxq->descs + rxq->next_desc_to_proc);
4077 return rxq->descs + rx_desc;
4078}
4079
4080/* Set rx queue offset */
4081static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4082 int prxq, int offset)
4083{
4084 u32 val;
4085
4086 /* Convert offset from bytes to units of 32 bytes */
4087 offset = offset >> 5;
4088
4089 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4090 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4091
4092 /* Offset is in */
4093 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4094 MVPP2_RXQ_PACKET_OFFSET_MASK);
4095
4096 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4097}
4098
4099/* Obtain BM cookie information from descriptor */
4100static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4101{
4102 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4103 MVPP2_RXD_BM_POOL_ID_OFFS;
4104 int cpu = smp_processor_id();
4105
4106 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4107 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4108}
4109
4110/* Tx descriptors helper methods */
4111
4112/* Get number of Tx descriptors waiting to be transmitted by HW */
4113static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4114 struct mvpp2_tx_queue *txq)
4115{
4116 u32 val;
4117
4118 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4119 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4120
4121 return val & MVPP2_TXQ_PENDING_MASK;
4122}
4123
4124/* Get pointer to next Tx descriptor to be processed (send) by HW */
4125static struct mvpp2_tx_desc *
4126mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4127{
4128 int tx_desc = txq->next_desc_to_proc;
4129
4130 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4131 return txq->descs + tx_desc;
4132}
4133
4134/* Update HW with number of aggregated Tx descriptors to be sent */
4135static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4136{
4137 /* aggregated access - relevant TXQ number is written in TX desc */
4138 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4139}
4140
4141
4142/* Check if there are enough free descriptors in aggregated txq.
4143 * If not, update the number of occupied descriptors and repeat the check.
4144 */
4145static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4146 struct mvpp2_tx_queue *aggr_txq, int num)
4147{
4148 if ((aggr_txq->count + num) > aggr_txq->size) {
4149 /* Update number of occupied aggregated Tx descriptors */
4150 int cpu = smp_processor_id();
4151 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4152
4153 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4154 }
4155
4156 if ((aggr_txq->count + num) > aggr_txq->size)
4157 return -ENOMEM;
4158
4159 return 0;
4160}
4161
4162/* Reserved Tx descriptors allocation request */
4163static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4164 struct mvpp2_tx_queue *txq, int num)
4165{
4166 u32 val;
4167
4168 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4169 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4170
4171 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4172
4173 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4174}
4175
4176/* Check if there are enough reserved descriptors for transmission.
4177 * If not, request chunk of reserved descriptors and check again.
4178 */
4179static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4180 struct mvpp2_tx_queue *txq,
4181 struct mvpp2_txq_pcpu *txq_pcpu,
4182 int num)
4183{
4184 int req, cpu, desc_count;
4185
4186 if (txq_pcpu->reserved_num >= num)
4187 return 0;
4188
4189 /* Not enough descriptors reserved! Update the reserved descriptor
4190 * count and check again.
4191 */
4192
4193 desc_count = 0;
4194 /* Compute total of used descriptors */
4195 for_each_present_cpu(cpu) {
4196 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4197
4198 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4199 desc_count += txq_pcpu_aux->count;
4200 desc_count += txq_pcpu_aux->reserved_num;
4201 }
4202
4203 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4204 desc_count += req;
4205
4206 if (desc_count >
4207 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4208 return -ENOMEM;
4209
4210 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4211
4212 /* OK, the descriptor cound has been updated: check again. */
4213 if (txq_pcpu->reserved_num < num)
4214 return -ENOMEM;
4215 return 0;
4216}
4217
4218/* Release the last allocated Tx descriptor. Useful to handle DMA
4219 * mapping failures in the Tx path.
4220 */
4221static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4222{
4223 if (txq->next_desc_to_proc == 0)
4224 txq->next_desc_to_proc = txq->last_desc - 1;
4225 else
4226 txq->next_desc_to_proc--;
4227}
4228
4229/* Set Tx descriptors fields relevant for CSUM calculation */
4230static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4231 int ip_hdr_len, int l4_proto)
4232{
4233 u32 command;
4234
4235 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4236 * G_L4_chk, L4_type required only for checksum calculation
4237 */
4238 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4239 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4240 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4241
4242 if (l3_proto == swab16(ETH_P_IP)) {
4243 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4244 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4245 } else {
4246 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4247 }
4248
4249 if (l4_proto == IPPROTO_TCP) {
4250 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4251 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4252 } else if (l4_proto == IPPROTO_UDP) {
4253 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4254 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4255 } else {
4256 command |= MVPP2_TXD_L4_CSUM_NOT;
4257 }
4258
4259 return command;
4260}
4261
4262/* Get number of sent descriptors and decrement counter.
4263 * The number of sent descriptors is returned.
4264 * Per-CPU access
4265 */
4266static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4267 struct mvpp2_tx_queue *txq)
4268{
4269 u32 val;
4270
4271 /* Reading status reg resets transmitted descriptor counter */
4272 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4273
4274 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4275 MVPP2_TRANSMITTED_COUNT_OFFSET;
4276}
4277
4278static void mvpp2_txq_sent_counter_clear(void *arg)
4279{
4280 struct mvpp2_port *port = arg;
4281 int queue;
4282
4283 for (queue = 0; queue < txq_number; queue++) {
4284 int id = port->txqs[queue]->id;
4285
4286 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4287 }
4288}
4289
4290/* Set max sizes for Tx queues */
4291static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4292{
4293 u32 val, size, mtu;
4294 int txq, tx_port_num;
4295
4296 mtu = port->pkt_size * 8;
4297 if (mtu > MVPP2_TXP_MTU_MAX)
4298 mtu = MVPP2_TXP_MTU_MAX;
4299
4300 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4301 mtu = 3 * mtu;
4302
4303 /* Indirect access to registers */
4304 tx_port_num = mvpp2_egress_port(port);
4305 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4306
4307 /* Set MTU */
4308 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4309 val &= ~MVPP2_TXP_MTU_MAX;
4310 val |= mtu;
4311 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4312
4313 /* TXP token size and all TXQs token size must be larger that MTU */
4314 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4315 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4316 if (size < mtu) {
4317 size = mtu;
4318 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4319 val |= size;
4320 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4321 }
4322
4323 for (txq = 0; txq < txq_number; txq++) {
4324 val = mvpp2_read(port->priv,
4325 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4326 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4327
4328 if (size < mtu) {
4329 size = mtu;
4330 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4331 val |= size;
4332 mvpp2_write(port->priv,
4333 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4334 val);
4335 }
4336 }
4337}
4338
4339/* Set the number of packets that will be received before Rx interrupt
4340 * will be generated by HW.
4341 */
4342static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4343 struct mvpp2_rx_queue *rxq, u32 pkts)
4344{
4345 u32 val;
4346
4347 val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
4348 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4349 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
4350
4351 rxq->pkts_coal = pkts;
4352}
4353
4354/* Set the time delay in usec before Rx interrupt */
4355static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4356 struct mvpp2_rx_queue *rxq, u32 usec)
4357{
4358 u32 val;
4359
4360 val = (port->priv->tclk / USEC_PER_SEC) * usec;
4361 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4362
4363 rxq->time_coal = usec;
4364}
4365
4366/* Set threshold for TX_DONE pkts coalescing */
4367static void mvpp2_tx_done_pkts_coal_set(void *arg)
4368{
4369 struct mvpp2_port *port = arg;
4370 int queue;
4371 u32 val;
4372
4373 for (queue = 0; queue < txq_number; queue++) {
4374 struct mvpp2_tx_queue *txq = port->txqs[queue];
4375
4376 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4377 MVPP2_TRANSMITTED_THRESH_MASK;
4378 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4379 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4380 }
4381}
4382
4383/* Free Tx queue skbuffs */
4384static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4385 struct mvpp2_tx_queue *txq,
4386 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4387{
4388 int i;
4389
4390 for (i = 0; i < num; i++) {
4391 struct mvpp2_tx_desc *tx_desc = txq->descs +
4392 txq_pcpu->txq_get_index;
4393 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4394
4395 mvpp2_txq_inc_get(txq_pcpu);
4396
4397 if (!skb)
4398 continue;
4399
4400 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
4401 tx_desc->data_size, DMA_TO_DEVICE);
4402 dev_kfree_skb_any(skb);
4403 }
4404}
4405
4406static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4407 u32 cause)
4408{
4409 int queue = fls(cause) - 1;
4410
4411 return port->rxqs[queue];
4412}
4413
4414static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4415 u32 cause)
4416{
4417 int queue = fls(cause >> 16) - 1;
4418
4419 return port->txqs[queue];
4420}
4421
4422/* Handle end of transmission */
4423static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4424 struct mvpp2_txq_pcpu *txq_pcpu)
4425{
4426 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4427 int tx_done;
4428
4429 if (txq_pcpu->cpu != smp_processor_id())
4430 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4431
4432 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4433 if (!tx_done)
4434 return;
4435 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4436
4437 txq_pcpu->count -= tx_done;
4438
4439 if (netif_tx_queue_stopped(nq))
4440 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4441 netif_tx_wake_queue(nq);
4442}
4443
4444/* Rx/Tx queue initialization/cleanup methods */
4445
4446/* Allocate and initialize descriptors for aggr TXQ */
4447static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4448 struct mvpp2_tx_queue *aggr_txq,
4449 int desc_num, int cpu,
4450 struct mvpp2 *priv)
4451{
4452 /* Allocate memory for TX descriptors */
4453 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4454 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4455 &aggr_txq->descs_phys, GFP_KERNEL);
4456 if (!aggr_txq->descs)
4457 return -ENOMEM;
4458
4459 /* Make sure descriptor address is cache line size aligned */
4460 BUG_ON(aggr_txq->descs !=
4461 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4462
4463 aggr_txq->last_desc = aggr_txq->size - 1;
4464
4465 /* Aggr TXQ no reset WA */
4466 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4467 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4468
4469 /* Set Tx descriptors queue starting address */
4470 /* indirect access */
4471 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4472 aggr_txq->descs_phys);
4473 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4474
4475 return 0;
4476}
4477
4478/* Create a specified Rx queue */
4479static int mvpp2_rxq_init(struct mvpp2_port *port,
4480 struct mvpp2_rx_queue *rxq)
4481
4482{
4483 rxq->size = port->rx_ring_size;
4484
4485 /* Allocate memory for RX descriptors */
4486 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4487 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4488 &rxq->descs_phys, GFP_KERNEL);
4489 if (!rxq->descs)
4490 return -ENOMEM;
4491
4492 BUG_ON(rxq->descs !=
4493 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4494
4495 rxq->last_desc = rxq->size - 1;
4496
4497 /* Zero occupied and non-occupied counters - direct access */
4498 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4499
4500 /* Set Rx descriptors queue starting address - indirect access */
4501 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4502 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4503 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4504 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4505
4506 /* Set Offset */
4507 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4508
4509 /* Set coalescing pkts and time */
4510 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
4511 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
4512
4513 /* Add number of descriptors ready for receiving packets */
4514 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4515
4516 return 0;
4517}
4518
4519/* Push packets received by the RXQ to BM pool */
4520static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4521 struct mvpp2_rx_queue *rxq)
4522{
4523 int rx_received, i;
4524
4525 rx_received = mvpp2_rxq_received(port, rxq->id);
4526 if (!rx_received)
4527 return;
4528
4529 for (i = 0; i < rx_received; i++) {
4530 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4531 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4532
4533 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4534 rx_desc->buf_cookie);
4535 }
4536 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4537}
4538
4539/* Cleanup Rx queue */
4540static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4541 struct mvpp2_rx_queue *rxq)
4542{
4543 mvpp2_rxq_drop_pkts(port, rxq);
4544
4545 if (rxq->descs)
4546 dma_free_coherent(port->dev->dev.parent,
4547 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4548 rxq->descs,
4549 rxq->descs_phys);
4550
4551 rxq->descs = NULL;
4552 rxq->last_desc = 0;
4553 rxq->next_desc_to_proc = 0;
4554 rxq->descs_phys = 0;
4555
4556 /* Clear Rx descriptors queue starting address and size;
4557 * free descriptor number
4558 */
4559 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4560 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4561 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4562 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4563}
4564
4565/* Create and initialize a Tx queue */
4566static int mvpp2_txq_init(struct mvpp2_port *port,
4567 struct mvpp2_tx_queue *txq)
4568{
4569 u32 val;
4570 int cpu, desc, desc_per_txq, tx_port_num;
4571 struct mvpp2_txq_pcpu *txq_pcpu;
4572
4573 txq->size = port->tx_ring_size;
4574
4575 /* Allocate memory for Tx descriptors */
4576 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4577 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4578 &txq->descs_phys, GFP_KERNEL);
4579 if (!txq->descs)
4580 return -ENOMEM;
4581
4582 /* Make sure descriptor address is cache line size aligned */
4583 BUG_ON(txq->descs !=
4584 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4585
4586 txq->last_desc = txq->size - 1;
4587
4588 /* Set Tx descriptors queue starting address - indirect access */
4589 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4590 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4591 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4592 MVPP2_TXQ_DESC_SIZE_MASK);
4593 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4594 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4595 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4596 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4597 val &= ~MVPP2_TXQ_PENDING_MASK;
4598 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4599
4600 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4601 * for each existing TXQ.
4602 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4603 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4604 */
4605 desc_per_txq = 16;
4606 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4607 (txq->log_id * desc_per_txq);
4608
4609 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4610 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4611 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4612
4613 /* WRR / EJP configuration - indirect access */
4614 tx_port_num = mvpp2_egress_port(port);
4615 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4616
4617 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4618 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4619 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4620 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4621 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4622
4623 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4624 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4625 val);
4626
4627 for_each_present_cpu(cpu) {
4628 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4629 txq_pcpu->size = txq->size;
4630 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4631 sizeof(*txq_pcpu->tx_skb),
4632 GFP_KERNEL);
4633 if (!txq_pcpu->tx_skb) {
4634 dma_free_coherent(port->dev->dev.parent,
4635 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4636 txq->descs, txq->descs_phys);
4637 return -ENOMEM;
4638 }
4639
4640 txq_pcpu->count = 0;
4641 txq_pcpu->reserved_num = 0;
4642 txq_pcpu->txq_put_index = 0;
4643 txq_pcpu->txq_get_index = 0;
4644 }
4645
4646 return 0;
4647}
4648
4649/* Free allocated TXQ resources */
4650static void mvpp2_txq_deinit(struct mvpp2_port *port,
4651 struct mvpp2_tx_queue *txq)
4652{
4653 struct mvpp2_txq_pcpu *txq_pcpu;
4654 int cpu;
4655
4656 for_each_present_cpu(cpu) {
4657 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4658 kfree(txq_pcpu->tx_skb);
4659 }
4660
4661 if (txq->descs)
4662 dma_free_coherent(port->dev->dev.parent,
4663 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4664 txq->descs, txq->descs_phys);
4665
4666 txq->descs = NULL;
4667 txq->last_desc = 0;
4668 txq->next_desc_to_proc = 0;
4669 txq->descs_phys = 0;
4670
4671 /* Set minimum bandwidth for disabled TXQs */
4672 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4673
4674 /* Set Tx descriptors queue starting address and size */
4675 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4676 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4677 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4678}
4679
4680/* Cleanup Tx ports */
4681static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4682{
4683 struct mvpp2_txq_pcpu *txq_pcpu;
4684 int delay, pending, cpu;
4685 u32 val;
4686
4687 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4688 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4689 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4690 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4691
4692 /* The napi queue has been stopped so wait for all packets
4693 * to be transmitted.
4694 */
4695 delay = 0;
4696 do {
4697 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4698 netdev_warn(port->dev,
4699 "port %d: cleaning queue %d timed out\n",
4700 port->id, txq->log_id);
4701 break;
4702 }
4703 mdelay(1);
4704 delay++;
4705
4706 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4707 } while (pending);
4708
4709 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4710 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4711
4712 for_each_present_cpu(cpu) {
4713 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4714
4715 /* Release all packets */
4716 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4717
4718 /* Reset queue */
4719 txq_pcpu->count = 0;
4720 txq_pcpu->txq_put_index = 0;
4721 txq_pcpu->txq_get_index = 0;
4722 }
4723}
4724
4725/* Cleanup all Tx queues */
4726static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4727{
4728 struct mvpp2_tx_queue *txq;
4729 int queue;
4730 u32 val;
4731
4732 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4733
4734 /* Reset Tx ports and delete Tx queues */
4735 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4736 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4737
4738 for (queue = 0; queue < txq_number; queue++) {
4739 txq = port->txqs[queue];
4740 mvpp2_txq_clean(port, txq);
4741 mvpp2_txq_deinit(port, txq);
4742 }
4743
4744 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4745
4746 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4747 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4748}
4749
4750/* Cleanup all Rx queues */
4751static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4752{
4753 int queue;
4754
4755 for (queue = 0; queue < rxq_number; queue++)
4756 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4757}
4758
4759/* Init all Rx queues for port */
4760static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4761{
4762 int queue, err;
4763
4764 for (queue = 0; queue < rxq_number; queue++) {
4765 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4766 if (err)
4767 goto err_cleanup;
4768 }
4769 return 0;
4770
4771err_cleanup:
4772 mvpp2_cleanup_rxqs(port);
4773 return err;
4774}
4775
4776/* Init all tx queues for port */
4777static int mvpp2_setup_txqs(struct mvpp2_port *port)
4778{
4779 struct mvpp2_tx_queue *txq;
4780 int queue, err;
4781
4782 for (queue = 0; queue < txq_number; queue++) {
4783 txq = port->txqs[queue];
4784 err = mvpp2_txq_init(port, txq);
4785 if (err)
4786 goto err_cleanup;
4787 }
4788
4789 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4790 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4791 return 0;
4792
4793err_cleanup:
4794 mvpp2_cleanup_txqs(port);
4795 return err;
4796}
4797
4798/* The callback for per-port interrupt */
4799static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4800{
4801 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4802
4803 mvpp2_interrupts_disable(port);
4804
4805 napi_schedule(&port->napi);
4806
4807 return IRQ_HANDLED;
4808}
4809
4810/* Adjust link */
4811static void mvpp2_link_event(struct net_device *dev)
4812{
4813 struct mvpp2_port *port = netdev_priv(dev);
4814 struct phy_device *phydev = port->phy_dev;
4815 int status_change = 0;
4816 u32 val;
4817
4818 if (phydev->link) {
4819 if ((port->speed != phydev->speed) ||
4820 (port->duplex != phydev->duplex)) {
4821 u32 val;
4822
4823 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4824 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4825 MVPP2_GMAC_CONFIG_GMII_SPEED |
4826 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4827 MVPP2_GMAC_AN_SPEED_EN |
4828 MVPP2_GMAC_AN_DUPLEX_EN);
4829
4830 if (phydev->duplex)
4831 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4832
4833 if (phydev->speed == SPEED_1000)
4834 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4835 else
4836 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4837
4838 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4839
4840 port->duplex = phydev->duplex;
4841 port->speed = phydev->speed;
4842 }
4843 }
4844
4845 if (phydev->link != port->link) {
4846 if (!phydev->link) {
4847 port->duplex = -1;
4848 port->speed = 0;
4849 }
4850
4851 port->link = phydev->link;
4852 status_change = 1;
4853 }
4854
4855 if (status_change) {
4856 if (phydev->link) {
4857 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4858 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4859 MVPP2_GMAC_FORCE_LINK_DOWN);
4860 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4861 mvpp2_egress_enable(port);
4862 mvpp2_ingress_enable(port);
4863 } else {
4864 mvpp2_ingress_disable(port);
4865 mvpp2_egress_disable(port);
4866 }
4867 phy_print_status(phydev);
4868 }
4869}
4870
4871/* Main RX/TX processing routines */
4872
4873/* Display more error info */
4874static void mvpp2_rx_error(struct mvpp2_port *port,
4875 struct mvpp2_rx_desc *rx_desc)
4876{
4877 u32 status = rx_desc->status;
4878
4879 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4880 case MVPP2_RXD_ERR_CRC:
4881 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4882 status, rx_desc->data_size);
4883 break;
4884 case MVPP2_RXD_ERR_OVERRUN:
4885 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4886 status, rx_desc->data_size);
4887 break;
4888 case MVPP2_RXD_ERR_RESOURCE:
4889 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4890 status, rx_desc->data_size);
4891 break;
4892 }
4893}
4894
4895/* Handle RX checksum offload */
4896static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4897 struct sk_buff *skb)
4898{
4899 if (((status & MVPP2_RXD_L3_IP4) &&
4900 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4901 (status & MVPP2_RXD_L3_IP6))
4902 if (((status & MVPP2_RXD_L4_UDP) ||
4903 (status & MVPP2_RXD_L4_TCP)) &&
4904 (status & MVPP2_RXD_L4_CSUM_OK)) {
4905 skb->csum = 0;
4906 skb->ip_summed = CHECKSUM_UNNECESSARY;
4907 return;
4908 }
4909
4910 skb->ip_summed = CHECKSUM_NONE;
4911}
4912
4913/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4914static int mvpp2_rx_refill(struct mvpp2_port *port,
4915 struct mvpp2_bm_pool *bm_pool,
4916 u32 bm, int is_recycle)
4917{
4918 struct sk_buff *skb;
4919 dma_addr_t phys_addr;
4920
4921 if (is_recycle &&
4922 (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
4923 return 0;
4924
4925 /* No recycle or too many buffers are in use, so allocate a new skb */
4926 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
4927 if (!skb)
4928 return -ENOMEM;
4929
4930 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
4931 atomic_dec(&bm_pool->in_use);
4932 return 0;
4933}
4934
4935/* Handle tx checksum */
4936static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
4937{
4938 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4939 int ip_hdr_len = 0;
4940 u8 l4_proto;
4941
4942 if (skb->protocol == htons(ETH_P_IP)) {
4943 struct iphdr *ip4h = ip_hdr(skb);
4944
4945 /* Calculate IPv4 checksum and L4 checksum */
4946 ip_hdr_len = ip4h->ihl;
4947 l4_proto = ip4h->protocol;
4948 } else if (skb->protocol == htons(ETH_P_IPV6)) {
4949 struct ipv6hdr *ip6h = ipv6_hdr(skb);
4950
4951 /* Read l4_protocol from one of IPv6 extra headers */
4952 if (skb_network_header_len(skb) > 0)
4953 ip_hdr_len = (skb_network_header_len(skb) >> 2);
4954 l4_proto = ip6h->nexthdr;
4955 } else {
4956 return MVPP2_TXD_L4_CSUM_NOT;
4957 }
4958
4959 return mvpp2_txq_desc_csum(skb_network_offset(skb),
4960 skb->protocol, ip_hdr_len, l4_proto);
4961 }
4962
4963 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
4964}
4965
4966static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
4967 struct mvpp2_rx_desc *rx_desc)
4968{
4969 struct mvpp2_buff_hdr *buff_hdr;
4970 struct sk_buff *skb;
4971 u32 rx_status = rx_desc->status;
4972 u32 buff_phys_addr;
4973 u32 buff_virt_addr;
4974 u32 buff_phys_addr_next;
4975 u32 buff_virt_addr_next;
4976 int mc_id;
4977 int pool_id;
4978
4979 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4980 MVPP2_RXD_BM_POOL_ID_OFFS;
4981 buff_phys_addr = rx_desc->buf_phys_addr;
4982 buff_virt_addr = rx_desc->buf_cookie;
4983
4984 do {
4985 skb = (struct sk_buff *)buff_virt_addr;
4986 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
4987
4988 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
4989
4990 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
4991 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
4992
4993 /* Release buffer */
4994 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
4995 buff_virt_addr, mc_id);
4996
4997 buff_phys_addr = buff_phys_addr_next;
4998 buff_virt_addr = buff_virt_addr_next;
4999
5000 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5001}
5002
5003/* Main rx processing */
5004static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5005 struct mvpp2_rx_queue *rxq)
5006{
5007 struct net_device *dev = port->dev;
5008 int rx_received, rx_filled, i;
5009 u32 rcvd_pkts = 0;
5010 u32 rcvd_bytes = 0;
5011
5012 /* Get number of received packets and clamp the to-do */
5013 rx_received = mvpp2_rxq_received(port, rxq->id);
5014 if (rx_todo > rx_received)
5015 rx_todo = rx_received;
5016
5017 rx_filled = 0;
5018 for (i = 0; i < rx_todo; i++) {
5019 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5020 struct mvpp2_bm_pool *bm_pool;
5021 struct sk_buff *skb;
5022 u32 bm, rx_status;
5023 int pool, rx_bytes, err;
5024
5025 rx_filled++;
5026 rx_status = rx_desc->status;
5027 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5028
5029 bm = mvpp2_bm_cookie_build(rx_desc);
5030 pool = mvpp2_bm_cookie_pool_get(bm);
5031 bm_pool = &port->priv->bm_pools[pool];
5032 /* Check if buffer header is used */
5033 if (rx_status & MVPP2_RXD_BUF_HDR) {
5034 mvpp2_buff_hdr_rx(port, rx_desc);
5035 continue;
5036 }
5037
5038 /* In case of an error, release the requested buffer pointer
5039 * to the Buffer Manager. This request process is controlled
5040 * by the hardware, and the information about the buffer is
5041 * comprised by the RX descriptor.
5042 */
5043 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5044 dev->stats.rx_errors++;
5045 mvpp2_rx_error(port, rx_desc);
5046 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5047 rx_desc->buf_cookie);
5048 continue;
5049 }
5050
5051 skb = (struct sk_buff *)rx_desc->buf_cookie;
5052
5053 rcvd_pkts++;
5054 rcvd_bytes += rx_bytes;
5055 atomic_inc(&bm_pool->in_use);
5056
5057 skb_reserve(skb, MVPP2_MH_SIZE);
5058 skb_put(skb, rx_bytes);
5059 skb->protocol = eth_type_trans(skb, dev);
5060 mvpp2_rx_csum(port, rx_status, skb);
5061
5062 napi_gro_receive(&port->napi, skb);
5063
5064 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5065 if (err) {
5066 netdev_err(port->dev, "failed to refill BM pools\n");
5067 rx_filled--;
5068 }
5069 }
5070
5071 if (rcvd_pkts) {
5072 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5073
5074 u64_stats_update_begin(&stats->syncp);
5075 stats->rx_packets += rcvd_pkts;
5076 stats->rx_bytes += rcvd_bytes;
5077 u64_stats_update_end(&stats->syncp);
5078 }
5079
5080 /* Update Rx queue management counters */
5081 wmb();
5082 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
5083
5084 return rx_todo;
5085}
5086
5087static inline void
5088tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5089 struct mvpp2_tx_desc *desc)
5090{
5091 dma_unmap_single(dev, desc->buf_phys_addr,
5092 desc->data_size, DMA_TO_DEVICE);
5093 mvpp2_txq_desc_put(txq);
5094}
5095
5096/* Handle tx fragmentation processing */
5097static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5098 struct mvpp2_tx_queue *aggr_txq,
5099 struct mvpp2_tx_queue *txq)
5100{
5101 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5102 struct mvpp2_tx_desc *tx_desc;
5103 int i;
5104 dma_addr_t buf_phys_addr;
5105
5106 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5108 void *addr = page_address(frag->page.p) + frag->page_offset;
5109
5110 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5111 tx_desc->phys_txq = txq->id;
5112 tx_desc->data_size = frag->size;
5113
5114 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5115 tx_desc->data_size,
5116 DMA_TO_DEVICE);
5117 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5118 mvpp2_txq_desc_put(txq);
5119 goto error;
5120 }
5121
5122 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5123 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5124
5125 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5126 /* Last descriptor */
5127 tx_desc->command = MVPP2_TXD_L_DESC;
5128 mvpp2_txq_inc_put(txq_pcpu, skb);
5129 } else {
5130 /* Descriptor in the middle: Not First, Not Last */
5131 tx_desc->command = 0;
5132 mvpp2_txq_inc_put(txq_pcpu, NULL);
5133 }
5134 }
5135
5136 return 0;
5137
5138error:
5139 /* Release all descriptors that were used to map fragments of
5140 * this packet, as well as the corresponding DMA mappings
5141 */
5142 for (i = i - 1; i >= 0; i--) {
5143 tx_desc = txq->descs + i;
5144 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5145 }
5146
5147 return -ENOMEM;
5148}
5149
5150/* Main tx processing */
5151static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5152{
5153 struct mvpp2_port *port = netdev_priv(dev);
5154 struct mvpp2_tx_queue *txq, *aggr_txq;
5155 struct mvpp2_txq_pcpu *txq_pcpu;
5156 struct mvpp2_tx_desc *tx_desc;
5157 dma_addr_t buf_phys_addr;
5158 int frags = 0;
5159 u16 txq_id;
5160 u32 tx_cmd;
5161
5162 txq_id = skb_get_queue_mapping(skb);
5163 txq = port->txqs[txq_id];
5164 txq_pcpu = this_cpu_ptr(txq->pcpu);
5165 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5166
5167 frags = skb_shinfo(skb)->nr_frags + 1;
5168
5169 /* Check number of available descriptors */
5170 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5171 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5172 txq_pcpu, frags)) {
5173 frags = 0;
5174 goto out;
5175 }
5176
5177 /* Get a descriptor for the first part of the packet */
5178 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5179 tx_desc->phys_txq = txq->id;
5180 tx_desc->data_size = skb_headlen(skb);
5181
5182 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5183 tx_desc->data_size, DMA_TO_DEVICE);
5184 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5185 mvpp2_txq_desc_put(txq);
5186 frags = 0;
5187 goto out;
5188 }
5189 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5190 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5191
5192 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5193
5194 if (frags == 1) {
5195 /* First and Last descriptor */
5196 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5197 tx_desc->command = tx_cmd;
5198 mvpp2_txq_inc_put(txq_pcpu, skb);
5199 } else {
5200 /* First but not Last */
5201 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5202 tx_desc->command = tx_cmd;
5203 mvpp2_txq_inc_put(txq_pcpu, NULL);
5204
5205 /* Continue with other skb fragments */
5206 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5207 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5208 frags = 0;
5209 goto out;
5210 }
5211 }
5212
5213 txq_pcpu->reserved_num -= frags;
5214 txq_pcpu->count += frags;
5215 aggr_txq->count += frags;
5216
5217 /* Enable transmit */
5218 wmb();
5219 mvpp2_aggr_txq_pend_desc_add(port, frags);
5220
5221 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5222 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5223
5224 netif_tx_stop_queue(nq);
5225 }
5226out:
5227 if (frags > 0) {
5228 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5229
5230 u64_stats_update_begin(&stats->syncp);
5231 stats->tx_packets++;
5232 stats->tx_bytes += skb->len;
5233 u64_stats_update_end(&stats->syncp);
5234 } else {
5235 dev->stats.tx_dropped++;
5236 dev_kfree_skb_any(skb);
5237 }
5238
5239 return NETDEV_TX_OK;
5240}
5241
5242static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5243{
5244 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5245 netdev_err(dev, "FCS error\n");
5246 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5247 netdev_err(dev, "rx fifo overrun error\n");
5248 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5249 netdev_err(dev, "tx fifo underrun error\n");
5250}
5251
5252static void mvpp2_txq_done_percpu(void *arg)
5253{
5254 struct mvpp2_port *port = arg;
5255 u32 cause_rx_tx, cause_tx, cause_misc;
5256
5257 /* Rx/Tx cause register
5258 *
5259 * Bits 0-15: each bit indicates received packets on the Rx queue
5260 * (bit 0 is for Rx queue 0).
5261 *
5262 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5263 * (bit 16 is for Tx queue 0).
5264 *
5265 * Each CPU has its own Rx/Tx cause register
5266 */
5267 cause_rx_tx = mvpp2_read(port->priv,
5268 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5269 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5270 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5271
5272 if (cause_misc) {
5273 mvpp2_cause_error(port->dev, cause_misc);
5274
5275 /* Clear the cause register */
5276 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5277 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5278 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5279 }
5280
5281 /* Release TX descriptors */
5282 if (cause_tx) {
5283 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5284 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5285
5286 if (txq_pcpu->count)
5287 mvpp2_txq_done(port, txq, txq_pcpu);
5288 }
5289}
5290
5291static int mvpp2_poll(struct napi_struct *napi, int budget)
5292{
5293 u32 cause_rx_tx, cause_rx;
5294 int rx_done = 0;
5295 struct mvpp2_port *port = netdev_priv(napi->dev);
5296
5297 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5298
5299 cause_rx_tx = mvpp2_read(port->priv,
5300 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5301 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5302
5303 /* Process RX packets */
5304 cause_rx |= port->pending_cause_rx;
5305 while (cause_rx && budget > 0) {
5306 int count;
5307 struct mvpp2_rx_queue *rxq;
5308
5309 rxq = mvpp2_get_rx_queue(port, cause_rx);
5310 if (!rxq)
5311 break;
5312
5313 count = mvpp2_rx(port, budget, rxq);
5314 rx_done += count;
5315 budget -= count;
5316 if (budget > 0) {
5317 /* Clear the bit associated to this Rx queue
5318 * so that next iteration will continue from
5319 * the next Rx queue.
5320 */
5321 cause_rx &= ~(1 << rxq->logic_rxq);
5322 }
5323 }
5324
5325 if (budget > 0) {
5326 cause_rx = 0;
5327 napi_complete(napi);
5328
5329 mvpp2_interrupts_enable(port);
5330 }
5331 port->pending_cause_rx = cause_rx;
5332 return rx_done;
5333}
5334
5335/* Set hw internals when starting port */
5336static void mvpp2_start_dev(struct mvpp2_port *port)
5337{
5338 mvpp2_gmac_max_rx_size_set(port);
5339 mvpp2_txp_max_tx_size_set(port);
5340
5341 napi_enable(&port->napi);
5342
5343 /* Enable interrupts on all CPUs */
5344 mvpp2_interrupts_enable(port);
5345
5346 mvpp2_port_enable(port);
5347 phy_start(port->phy_dev);
5348 netif_tx_start_all_queues(port->dev);
5349}
5350
5351/* Set hw internals when stopping port */
5352static void mvpp2_stop_dev(struct mvpp2_port *port)
5353{
5354 /* Stop new packets from arriving to RXQs */
5355 mvpp2_ingress_disable(port);
5356
5357 mdelay(10);
5358
5359 /* Disable interrupts on all CPUs */
5360 mvpp2_interrupts_disable(port);
5361
5362 napi_disable(&port->napi);
5363
5364 netif_carrier_off(port->dev);
5365 netif_tx_stop_all_queues(port->dev);
5366
5367 mvpp2_egress_disable(port);
5368 mvpp2_port_disable(port);
5369 phy_stop(port->phy_dev);
5370}
5371
5372/* Return positive if MTU is valid */
5373static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
5374{
5375 if (mtu < 68) {
5376 netdev_err(dev, "cannot change mtu to less than 68\n");
5377 return -EINVAL;
5378 }
5379
5380 /* 9676 == 9700 - 20 and rounding to 8 */
5381 if (mtu > 9676) {
5382 netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
5383 mtu = 9676;
5384 }
5385
5386 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5387 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5388 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5389 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5390 }
5391
5392 return mtu;
5393}
5394
5395static int mvpp2_check_ringparam_valid(struct net_device *dev,
5396 struct ethtool_ringparam *ring)
5397{
5398 u16 new_rx_pending = ring->rx_pending;
5399 u16 new_tx_pending = ring->tx_pending;
5400
5401 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5402 return -EINVAL;
5403
5404 if (ring->rx_pending > MVPP2_MAX_RXD)
5405 new_rx_pending = MVPP2_MAX_RXD;
5406 else if (!IS_ALIGNED(ring->rx_pending, 16))
5407 new_rx_pending = ALIGN(ring->rx_pending, 16);
5408
5409 if (ring->tx_pending > MVPP2_MAX_TXD)
5410 new_tx_pending = MVPP2_MAX_TXD;
5411 else if (!IS_ALIGNED(ring->tx_pending, 32))
5412 new_tx_pending = ALIGN(ring->tx_pending, 32);
5413
5414 if (ring->rx_pending != new_rx_pending) {
5415 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5416 ring->rx_pending, new_rx_pending);
5417 ring->rx_pending = new_rx_pending;
5418 }
5419
5420 if (ring->tx_pending != new_tx_pending) {
5421 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5422 ring->tx_pending, new_tx_pending);
5423 ring->tx_pending = new_tx_pending;
5424 }
5425
5426 return 0;
5427}
5428
5429static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5430{
5431 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5432
5433 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5434 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5435 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5436 addr[0] = (mac_addr_h >> 24) & 0xFF;
5437 addr[1] = (mac_addr_h >> 16) & 0xFF;
5438 addr[2] = (mac_addr_h >> 8) & 0xFF;
5439 addr[3] = mac_addr_h & 0xFF;
5440 addr[4] = mac_addr_m & 0xFF;
5441 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5442}
5443
5444static int mvpp2_phy_connect(struct mvpp2_port *port)
5445{
5446 struct phy_device *phy_dev;
5447
5448 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5449 port->phy_interface);
5450 if (!phy_dev) {
5451 netdev_err(port->dev, "cannot connect to phy\n");
5452 return -ENODEV;
5453 }
5454 phy_dev->supported &= PHY_GBIT_FEATURES;
5455 phy_dev->advertising = phy_dev->supported;
5456
5457 port->phy_dev = phy_dev;
5458 port->link = 0;
5459 port->duplex = 0;
5460 port->speed = 0;
5461
5462 return 0;
5463}
5464
5465static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5466{
5467 phy_disconnect(port->phy_dev);
5468 port->phy_dev = NULL;
5469}
5470
5471static int mvpp2_open(struct net_device *dev)
5472{
5473 struct mvpp2_port *port = netdev_priv(dev);
5474 unsigned char mac_bcast[ETH_ALEN] = {
5475 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5476 int err;
5477
5478 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5479 if (err) {
5480 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5481 return err;
5482 }
5483 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5484 dev->dev_addr, true);
5485 if (err) {
5486 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5487 return err;
5488 }
5489 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5490 if (err) {
5491 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5492 return err;
5493 }
5494 err = mvpp2_prs_def_flow(port);
5495 if (err) {
5496 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5497 return err;
5498 }
5499
5500 /* Allocate the Rx/Tx queues */
5501 err = mvpp2_setup_rxqs(port);
5502 if (err) {
5503 netdev_err(port->dev, "cannot allocate Rx queues\n");
5504 return err;
5505 }
5506
5507 err = mvpp2_setup_txqs(port);
5508 if (err) {
5509 netdev_err(port->dev, "cannot allocate Tx queues\n");
5510 goto err_cleanup_rxqs;
5511 }
5512
5513 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5514 if (err) {
5515 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5516 goto err_cleanup_txqs;
5517 }
5518
5519 /* In default link is down */
5520 netif_carrier_off(port->dev);
5521
5522 err = mvpp2_phy_connect(port);
5523 if (err < 0)
5524 goto err_free_irq;
5525
5526 /* Unmask interrupts on all CPUs */
5527 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5528
5529 mvpp2_start_dev(port);
5530
5531 return 0;
5532
5533err_free_irq:
5534 free_irq(port->irq, port);
5535err_cleanup_txqs:
5536 mvpp2_cleanup_txqs(port);
5537err_cleanup_rxqs:
5538 mvpp2_cleanup_rxqs(port);
5539 return err;
5540}
5541
5542static int mvpp2_stop(struct net_device *dev)
5543{
5544 struct mvpp2_port *port = netdev_priv(dev);
5545
5546 mvpp2_stop_dev(port);
5547 mvpp2_phy_disconnect(port);
5548
5549 /* Mask interrupts on all CPUs */
5550 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5551
5552 free_irq(port->irq, port);
5553 mvpp2_cleanup_rxqs(port);
5554 mvpp2_cleanup_txqs(port);
5555
5556 return 0;
5557}
5558
5559static void mvpp2_set_rx_mode(struct net_device *dev)
5560{
5561 struct mvpp2_port *port = netdev_priv(dev);
5562 struct mvpp2 *priv = port->priv;
5563 struct netdev_hw_addr *ha;
5564 int id = port->id;
5565 bool allmulti = dev->flags & IFF_ALLMULTI;
5566
5567 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5568 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5569 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5570
5571 /* Remove all port->id's mcast enries */
5572 mvpp2_prs_mcast_del_all(priv, id);
5573
5574 if (allmulti && !netdev_mc_empty(dev)) {
5575 netdev_for_each_mc_addr(ha, dev)
5576 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5577 }
5578}
5579
5580static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5581{
5582 struct mvpp2_port *port = netdev_priv(dev);
5583 const struct sockaddr *addr = p;
5584 int err;
5585
5586 if (!is_valid_ether_addr(addr->sa_data)) {
5587 err = -EADDRNOTAVAIL;
5588 goto error;
5589 }
5590
5591 if (!netif_running(dev)) {
5592 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5593 if (!err)
5594 return 0;
5595 /* Reconfigure parser to accept the original MAC address */
5596 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5597 if (err)
5598 goto error;
5599 }
5600
5601 mvpp2_stop_dev(port);
5602
5603 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5604 if (!err)
5605 goto out_start;
5606
5607 /* Reconfigure parser accept the original MAC address */
5608 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5609 if (err)
5610 goto error;
5611out_start:
5612 mvpp2_start_dev(port);
5613 mvpp2_egress_enable(port);
5614 mvpp2_ingress_enable(port);
5615 return 0;
5616
5617error:
5618 netdev_err(dev, "fail to change MAC address\n");
5619 return err;
5620}
5621
5622static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5623{
5624 struct mvpp2_port *port = netdev_priv(dev);
5625 int err;
5626
5627 mtu = mvpp2_check_mtu_valid(dev, mtu);
5628 if (mtu < 0) {
5629 err = mtu;
5630 goto error;
5631 }
5632
5633 if (!netif_running(dev)) {
5634 err = mvpp2_bm_update_mtu(dev, mtu);
5635 if (!err) {
5636 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5637 return 0;
5638 }
5639
5640 /* Reconfigure BM to the original MTU */
5641 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5642 if (err)
5643 goto error;
5644 }
5645
5646 mvpp2_stop_dev(port);
5647
5648 err = mvpp2_bm_update_mtu(dev, mtu);
5649 if (!err) {
5650 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5651 goto out_start;
5652 }
5653
5654 /* Reconfigure BM to the original MTU */
5655 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5656 if (err)
5657 goto error;
5658
5659out_start:
5660 mvpp2_start_dev(port);
5661 mvpp2_egress_enable(port);
5662 mvpp2_ingress_enable(port);
5663
5664 return 0;
5665
5666error:
5667 netdev_err(dev, "fail to change MTU\n");
5668 return err;
5669}
5670
5671static struct rtnl_link_stats64 *
5672mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5673{
5674 struct mvpp2_port *port = netdev_priv(dev);
5675 unsigned int start;
5676 int cpu;
5677
5678 for_each_possible_cpu(cpu) {
5679 struct mvpp2_pcpu_stats *cpu_stats;
5680 u64 rx_packets;
5681 u64 rx_bytes;
5682 u64 tx_packets;
5683 u64 tx_bytes;
5684
5685 cpu_stats = per_cpu_ptr(port->stats, cpu);
5686 do {
5687 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5688 rx_packets = cpu_stats->rx_packets;
5689 rx_bytes = cpu_stats->rx_bytes;
5690 tx_packets = cpu_stats->tx_packets;
5691 tx_bytes = cpu_stats->tx_bytes;
5692 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5693
5694 stats->rx_packets += rx_packets;
5695 stats->rx_bytes += rx_bytes;
5696 stats->tx_packets += tx_packets;
5697 stats->tx_bytes += tx_bytes;
5698 }
5699
5700 stats->rx_errors = dev->stats.rx_errors;
5701 stats->rx_dropped = dev->stats.rx_dropped;
5702 stats->tx_dropped = dev->stats.tx_dropped;
5703
5704 return stats;
5705}
5706
5707/* Ethtool methods */
5708
5709/* Get settings (phy address, speed) for ethtools */
5710static int mvpp2_ethtool_get_settings(struct net_device *dev,
5711 struct ethtool_cmd *cmd)
5712{
5713 struct mvpp2_port *port = netdev_priv(dev);
5714
5715 if (!port->phy_dev)
5716 return -ENODEV;
5717 return phy_ethtool_gset(port->phy_dev, cmd);
5718}
5719
5720/* Set settings (phy address, speed) for ethtools */
5721static int mvpp2_ethtool_set_settings(struct net_device *dev,
5722 struct ethtool_cmd *cmd)
5723{
5724 struct mvpp2_port *port = netdev_priv(dev);
5725
5726 if (!port->phy_dev)
5727 return -ENODEV;
5728 return phy_ethtool_sset(port->phy_dev, cmd);
5729}
5730
5731/* Set interrupt coalescing for ethtools */
5732static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5733 struct ethtool_coalesce *c)
5734{
5735 struct mvpp2_port *port = netdev_priv(dev);
5736 int queue;
5737
5738 for (queue = 0; queue < rxq_number; queue++) {
5739 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5740
5741 rxq->time_coal = c->rx_coalesce_usecs;
5742 rxq->pkts_coal = c->rx_max_coalesced_frames;
5743 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
5744 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
5745 }
5746
5747 for (queue = 0; queue < txq_number; queue++) {
5748 struct mvpp2_tx_queue *txq = port->txqs[queue];
5749
5750 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5751 }
5752
5753 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5754 return 0;
5755}
5756
5757/* get coalescing for ethtools */
5758static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5759 struct ethtool_coalesce *c)
5760{
5761 struct mvpp2_port *port = netdev_priv(dev);
5762
5763 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5764 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5765 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5766 return 0;
5767}
5768
5769static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5770 struct ethtool_drvinfo *drvinfo)
5771{
5772 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5773 sizeof(drvinfo->driver));
5774 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5775 sizeof(drvinfo->version));
5776 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5777 sizeof(drvinfo->bus_info));
5778}
5779
5780static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5781 struct ethtool_ringparam *ring)
5782{
5783 struct mvpp2_port *port = netdev_priv(dev);
5784
5785 ring->rx_max_pending = MVPP2_MAX_RXD;
5786 ring->tx_max_pending = MVPP2_MAX_TXD;
5787 ring->rx_pending = port->rx_ring_size;
5788 ring->tx_pending = port->tx_ring_size;
5789}
5790
5791static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5792 struct ethtool_ringparam *ring)
5793{
5794 struct mvpp2_port *port = netdev_priv(dev);
5795 u16 prev_rx_ring_size = port->rx_ring_size;
5796 u16 prev_tx_ring_size = port->tx_ring_size;
5797 int err;
5798
5799 err = mvpp2_check_ringparam_valid(dev, ring);
5800 if (err)
5801 return err;
5802
5803 if (!netif_running(dev)) {
5804 port->rx_ring_size = ring->rx_pending;
5805 port->tx_ring_size = ring->tx_pending;
5806 return 0;
5807 }
5808
5809 /* The interface is running, so we have to force a
5810 * reallocation of the queues
5811 */
5812 mvpp2_stop_dev(port);
5813 mvpp2_cleanup_rxqs(port);
5814 mvpp2_cleanup_txqs(port);
5815
5816 port->rx_ring_size = ring->rx_pending;
5817 port->tx_ring_size = ring->tx_pending;
5818
5819 err = mvpp2_setup_rxqs(port);
5820 if (err) {
5821 /* Reallocate Rx queues with the original ring size */
5822 port->rx_ring_size = prev_rx_ring_size;
5823 ring->rx_pending = prev_rx_ring_size;
5824 err = mvpp2_setup_rxqs(port);
5825 if (err)
5826 goto err_out;
5827 }
5828 err = mvpp2_setup_txqs(port);
5829 if (err) {
5830 /* Reallocate Tx queues with the original ring size */
5831 port->tx_ring_size = prev_tx_ring_size;
5832 ring->tx_pending = prev_tx_ring_size;
5833 err = mvpp2_setup_txqs(port);
5834 if (err)
5835 goto err_clean_rxqs;
5836 }
5837
5838 mvpp2_start_dev(port);
5839 mvpp2_egress_enable(port);
5840 mvpp2_ingress_enable(port);
5841
5842 return 0;
5843
5844err_clean_rxqs:
5845 mvpp2_cleanup_rxqs(port);
5846err_out:
5847 netdev_err(dev, "fail to change ring parameters");
5848 return err;
5849}
5850
5851/* Device ops */
5852
5853static const struct net_device_ops mvpp2_netdev_ops = {
5854 .ndo_open = mvpp2_open,
5855 .ndo_stop = mvpp2_stop,
5856 .ndo_start_xmit = mvpp2_tx,
5857 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5858 .ndo_set_mac_address = mvpp2_set_mac_address,
5859 .ndo_change_mtu = mvpp2_change_mtu,
5860 .ndo_get_stats64 = mvpp2_get_stats64,
5861};
5862
5863static const struct ethtool_ops mvpp2_eth_tool_ops = {
5864 .get_link = ethtool_op_get_link,
5865 .get_settings = mvpp2_ethtool_get_settings,
5866 .set_settings = mvpp2_ethtool_set_settings,
5867 .set_coalesce = mvpp2_ethtool_set_coalesce,
5868 .get_coalesce = mvpp2_ethtool_get_coalesce,
5869 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5870 .get_ringparam = mvpp2_ethtool_get_ringparam,
5871 .set_ringparam = mvpp2_ethtool_set_ringparam,
5872};
5873
5874/* Driver initialization */
5875
5876static void mvpp2_port_power_up(struct mvpp2_port *port)
5877{
5878 mvpp2_port_mii_set(port);
5879 mvpp2_port_periodic_xon_disable(port);
5880 mvpp2_port_reset(port);
5881}
5882
5883/* Initialize port HW */
5884static int mvpp2_port_init(struct mvpp2_port *port)
5885{
5886 struct device *dev = port->dev->dev.parent;
5887 struct mvpp2 *priv = port->priv;
5888 struct mvpp2_txq_pcpu *txq_pcpu;
5889 int queue, cpu, err;
5890
5891 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
5892 return -EINVAL;
5893
5894 /* Disable port */
5895 mvpp2_egress_disable(port);
5896 mvpp2_port_disable(port);
5897
5898 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
5899 GFP_KERNEL);
5900 if (!port->txqs)
5901 return -ENOMEM;
5902
5903 /* Associate physical Tx queues to this port and initialize.
5904 * The mapping is predefined.
5905 */
5906 for (queue = 0; queue < txq_number; queue++) {
5907 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5908 struct mvpp2_tx_queue *txq;
5909
5910 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5911 if (!txq)
5912 return -ENOMEM;
5913
5914 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5915 if (!txq->pcpu) {
5916 err = -ENOMEM;
5917 goto err_free_percpu;
5918 }
5919
5920 txq->id = queue_phy_id;
5921 txq->log_id = queue;
5922 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5923 for_each_present_cpu(cpu) {
5924 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5925 txq_pcpu->cpu = cpu;
5926 }
5927
5928 port->txqs[queue] = txq;
5929 }
5930
5931 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
5932 GFP_KERNEL);
5933 if (!port->rxqs) {
5934 err = -ENOMEM;
5935 goto err_free_percpu;
5936 }
5937
5938 /* Allocate and initialize Rx queue for this port */
5939 for (queue = 0; queue < rxq_number; queue++) {
5940 struct mvpp2_rx_queue *rxq;
5941
5942 /* Map physical Rx queue to port's logical Rx queue */
5943 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5944 if (!rxq)
5945 goto err_free_percpu;
5946 /* Map this Rx queue to a physical queue */
5947 rxq->id = port->first_rxq + queue;
5948 rxq->port = port->id;
5949 rxq->logic_rxq = queue;
5950
5951 port->rxqs[queue] = rxq;
5952 }
5953
5954 /* Configure Rx queue group interrupt for this port */
5955 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
5956
5957 /* Create Rx descriptor rings */
5958 for (queue = 0; queue < rxq_number; queue++) {
5959 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5960
5961 rxq->size = port->rx_ring_size;
5962 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5963 rxq->time_coal = MVPP2_RX_COAL_USEC;
5964 }
5965
5966 mvpp2_ingress_disable(port);
5967
5968 /* Port default configuration */
5969 mvpp2_defaults_set(port);
5970
5971 /* Port's classifier configuration */
5972 mvpp2_cls_oversize_rxq_set(port);
5973 mvpp2_cls_port_config(port);
5974
5975 /* Provide an initial Rx packet size */
5976 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
5977
5978 /* Initialize pools for swf */
5979 err = mvpp2_swf_bm_pool_init(port);
5980 if (err)
5981 goto err_free_percpu;
5982
5983 return 0;
5984
5985err_free_percpu:
5986 for (queue = 0; queue < txq_number; queue++) {
5987 if (!port->txqs[queue])
5988 continue;
5989 free_percpu(port->txqs[queue]->pcpu);
5990 }
5991 return err;
5992}
5993
5994/* Ports initialization */
5995static int mvpp2_port_probe(struct platform_device *pdev,
5996 struct device_node *port_node,
5997 struct mvpp2 *priv,
5998 int *next_first_rxq)
5999{
6000 struct device_node *phy_node;
6001 struct mvpp2_port *port;
6002 struct net_device *dev;
6003 struct resource *res;
6004 const char *dt_mac_addr;
6005 const char *mac_from;
6006 char hw_mac_addr[ETH_ALEN];
6007 u32 id;
6008 int features;
6009 int phy_mode;
6010 int priv_common_regs_num = 2;
6011 int err, i;
6012
6013 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6014 rxq_number);
6015 if (!dev)
6016 return -ENOMEM;
6017
6018 phy_node = of_parse_phandle(port_node, "phy", 0);
6019 if (!phy_node) {
6020 dev_err(&pdev->dev, "missing phy\n");
6021 err = -ENODEV;
6022 goto err_free_netdev;
6023 }
6024
6025 phy_mode = of_get_phy_mode(port_node);
6026 if (phy_mode < 0) {
6027 dev_err(&pdev->dev, "incorrect phy mode\n");
6028 err = phy_mode;
6029 goto err_free_netdev;
6030 }
6031
6032 if (of_property_read_u32(port_node, "port-id", &id)) {
6033 err = -EINVAL;
6034 dev_err(&pdev->dev, "missing port-id value\n");
6035 goto err_free_netdev;
6036 }
6037
6038 dev->tx_queue_len = MVPP2_MAX_TXD;
6039 dev->watchdog_timeo = 5 * HZ;
6040 dev->netdev_ops = &mvpp2_netdev_ops;
6041 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6042
6043 port = netdev_priv(dev);
6044
6045 port->irq = irq_of_parse_and_map(port_node, 0);
6046 if (port->irq <= 0) {
6047 err = -EINVAL;
6048 goto err_free_netdev;
6049 }
6050
6051 if (of_property_read_bool(port_node, "marvell,loopback"))
6052 port->flags |= MVPP2_F_LOOPBACK;
6053
6054 port->priv = priv;
6055 port->id = id;
6056 port->first_rxq = *next_first_rxq;
6057 port->phy_node = phy_node;
6058 port->phy_interface = phy_mode;
6059
6060 res = platform_get_resource(pdev, IORESOURCE_MEM,
6061 priv_common_regs_num + id);
6062 port->base = devm_ioremap_resource(&pdev->dev, res);
6063 if (IS_ERR(port->base)) {
6064 err = PTR_ERR(port->base);
6065 dev_err(&pdev->dev, "cannot obtain port base address\n");
6066 goto err_free_irq;
6067 }
6068
6069 /* Alloc per-cpu stats */
6070 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6071 if (!port->stats) {
6072 err = -ENOMEM;
6073 goto err_free_irq;
6074 }
6075
6076 dt_mac_addr = of_get_mac_address(port_node);
6077 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6078 mac_from = "device tree";
6079 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6080 } else {
6081 mvpp2_get_mac_address(port, hw_mac_addr);
6082 if (is_valid_ether_addr(hw_mac_addr)) {
6083 mac_from = "hardware";
6084 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6085 } else {
6086 mac_from = "random";
6087 eth_hw_addr_random(dev);
6088 }
6089 }
6090
6091 port->tx_ring_size = MVPP2_MAX_TXD;
6092 port->rx_ring_size = MVPP2_MAX_RXD;
6093 port->dev = dev;
6094 SET_NETDEV_DEV(dev, &pdev->dev);
6095
6096 err = mvpp2_port_init(port);
6097 if (err < 0) {
6098 dev_err(&pdev->dev, "failed to init port %d\n", id);
6099 goto err_free_stats;
6100 }
6101 mvpp2_port_power_up(port);
6102
6103 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6104 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6105 dev->features = features | NETIF_F_RXCSUM;
6106 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6107 dev->vlan_features |= features;
6108
6109 err = register_netdev(dev);
6110 if (err < 0) {
6111 dev_err(&pdev->dev, "failed to register netdev\n");
6112 goto err_free_txq_pcpu;
6113 }
6114 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6115
6116 /* Increment the first Rx queue number to be used by the next port */
6117 *next_first_rxq += rxq_number;
6118 priv->port_list[id] = port;
6119 return 0;
6120
6121err_free_txq_pcpu:
6122 for (i = 0; i < txq_number; i++)
6123 free_percpu(port->txqs[i]->pcpu);
6124err_free_stats:
6125 free_percpu(port->stats);
6126err_free_irq:
6127 irq_dispose_mapping(port->irq);
6128err_free_netdev:
6129 free_netdev(dev);
6130 return err;
6131}
6132
6133/* Ports removal routine */
6134static void mvpp2_port_remove(struct mvpp2_port *port)
6135{
6136 int i;
6137
6138 unregister_netdev(port->dev);
6139 free_percpu(port->stats);
6140 for (i = 0; i < txq_number; i++)
6141 free_percpu(port->txqs[i]->pcpu);
6142 irq_dispose_mapping(port->irq);
6143 free_netdev(port->dev);
6144}
6145
6146/* Initialize decoding windows */
6147static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6148 struct mvpp2 *priv)
6149{
6150 u32 win_enable;
6151 int i;
6152
6153 for (i = 0; i < 6; i++) {
6154 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6155 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6156
6157 if (i < 4)
6158 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6159 }
6160
6161 win_enable = 0;
6162
6163 for (i = 0; i < dram->num_cs; i++) {
6164 const struct mbus_dram_window *cs = dram->cs + i;
6165
6166 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6167 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6168 dram->mbus_dram_target_id);
6169
6170 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6171 (cs->size - 1) & 0xffff0000);
6172
6173 win_enable |= (1 << i);
6174 }
6175
6176 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6177}
6178
6179/* Initialize Rx FIFO's */
6180static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6181{
6182 int port;
6183
6184 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6185 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6186 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6187 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6188 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6189 }
6190
6191 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6192 MVPP2_RX_FIFO_PORT_MIN_PKT);
6193 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6194}
6195
6196/* Initialize network controller common part HW */
6197static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6198{
6199 const struct mbus_dram_target_info *dram_target_info;
6200 int err, i;
6201
6202 /* Checks for hardware constraints */
6203 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6204 (txq_number > MVPP2_MAX_TXQ)) {
6205 dev_err(&pdev->dev, "invalid queue size parameter\n");
6206 return -EINVAL;
6207 }
6208
6209 /* MBUS windows configuration */
6210 dram_target_info = mv_mbus_dram_info();
6211 if (dram_target_info)
6212 mvpp2_conf_mbus_windows(dram_target_info, priv);
6213
6214 /* Allocate and initialize aggregated TXQs */
6215 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6216 sizeof(struct mvpp2_tx_queue),
6217 GFP_KERNEL);
6218 if (!priv->aggr_txqs)
6219 return -ENOMEM;
6220
6221 for_each_present_cpu(i) {
6222 priv->aggr_txqs[i].id = i;
6223 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6224 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6225 MVPP2_AGGR_TXQ_SIZE, i, priv);
6226 if (err < 0)
6227 return err;
6228 }
6229
6230 /* Rx Fifo Init */
6231 mvpp2_rx_fifo_init(priv);
6232
6233 /* Reset Rx queue group interrupt configuration */
6234 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6235 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6236
6237 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6238 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6239
6240 /* Allow cache snoop when transmiting packets */
6241 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6242
6243 /* Buffer Manager initialization */
6244 err = mvpp2_bm_init(pdev, priv);
6245 if (err < 0)
6246 return err;
6247
6248 /* Parser default initialization */
6249 err = mvpp2_prs_default_init(pdev, priv);
6250 if (err < 0)
6251 return err;
6252
6253 /* Classifier default initialization */
6254 mvpp2_cls_init(priv);
6255
6256 return 0;
6257}
6258
6259static int mvpp2_probe(struct platform_device *pdev)
6260{
6261 struct device_node *dn = pdev->dev.of_node;
6262 struct device_node *port_node;
6263 struct mvpp2 *priv;
6264 struct resource *res;
6265 int port_count, first_rxq;
6266 int err;
6267
6268 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6269 if (!priv)
6270 return -ENOMEM;
6271
6272 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6273 priv->base = devm_ioremap_resource(&pdev->dev, res);
6274 if (IS_ERR(priv->base))
6275 return PTR_ERR(priv->base);
6276
6277 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6278 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6279 if (IS_ERR(priv->lms_base))
6280 return PTR_ERR(priv->lms_base);
6281
6282 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6283 if (IS_ERR(priv->pp_clk))
6284 return PTR_ERR(priv->pp_clk);
6285 err = clk_prepare_enable(priv->pp_clk);
6286 if (err < 0)
6287 return err;
6288
6289 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6290 if (IS_ERR(priv->gop_clk)) {
6291 err = PTR_ERR(priv->gop_clk);
6292 goto err_pp_clk;
6293 }
6294 err = clk_prepare_enable(priv->gop_clk);
6295 if (err < 0)
6296 goto err_pp_clk;
6297
6298 /* Get system's tclk rate */
6299 priv->tclk = clk_get_rate(priv->pp_clk);
6300
6301 /* Initialize network controller */
6302 err = mvpp2_init(pdev, priv);
6303 if (err < 0) {
6304 dev_err(&pdev->dev, "failed to initialize controller\n");
6305 goto err_gop_clk;
6306 }
6307
6308 port_count = of_get_available_child_count(dn);
6309 if (port_count == 0) {
6310 dev_err(&pdev->dev, "no ports enabled\n");
6311 goto err_gop_clk;
6312 }
6313
6314 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6315 sizeof(struct mvpp2_port *),
6316 GFP_KERNEL);
6317 if (!priv->port_list) {
6318 err = -ENOMEM;
6319 goto err_gop_clk;
6320 }
6321
6322 /* Initialize ports */
6323 first_rxq = 0;
6324 for_each_available_child_of_node(dn, port_node) {
6325 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6326 if (err < 0)
6327 goto err_gop_clk;
6328 }
6329
6330 platform_set_drvdata(pdev, priv);
6331 return 0;
6332
6333err_gop_clk:
6334 clk_disable_unprepare(priv->gop_clk);
6335err_pp_clk:
6336 clk_disable_unprepare(priv->pp_clk);
6337 return err;
6338}
6339
6340static int mvpp2_remove(struct platform_device *pdev)
6341{
6342 struct mvpp2 *priv = platform_get_drvdata(pdev);
6343 struct device_node *dn = pdev->dev.of_node;
6344 struct device_node *port_node;
6345 int i = 0;
6346
6347 for_each_available_child_of_node(dn, port_node) {
6348 if (priv->port_list[i])
6349 mvpp2_port_remove(priv->port_list[i]);
6350 i++;
6351 }
6352
6353 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6354 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6355
6356 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6357 }
6358
6359 for_each_present_cpu(i) {
6360 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6361
6362 dma_free_coherent(&pdev->dev,
6363 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6364 aggr_txq->descs,
6365 aggr_txq->descs_phys);
6366 }
6367
6368 clk_disable_unprepare(priv->pp_clk);
6369 clk_disable_unprepare(priv->gop_clk);
6370
6371 return 0;
6372}
6373
6374static const struct of_device_id mvpp2_match[] = {
6375 { .compatible = "marvell,armada-375-pp2" },
6376 { }
6377};
6378MODULE_DEVICE_TABLE(of, mvpp2_match);
6379
6380static struct platform_driver mvpp2_driver = {
6381 .probe = mvpp2_probe,
6382 .remove = mvpp2_remove,
6383 .driver = {
6384 .name = MVPP2_DRIVER_NAME,
6385 .of_match_table = mvpp2_match,
6386 },
6387};
6388
6389module_platform_driver(mvpp2_driver);
6390
6391MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6392MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6393MODULE_LICENSE("GPLv2");