aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-27 13:07:45 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-27 13:07:45 -0400
commit1dbe136938df6633042b5674abdd2e08542168f3 (patch)
treef8610e2496ccb9cc43dfde9f364db389a1a53475
parentfcb144b5df260d5005165589f958594facf1d6ae (diff)
parent66890ed642a8ed103e958c4f53ff773d2220effc (diff)
Merge branch 'sxgbe'
Byungho An says: ==================== This is 14th posting for SAMSUNG SXGBE driver. Changes since v1: - changed name of driver to SXGbE as per Ben's comment - squashed Joe's neatening for many stuff in original patches Changes since v2: - updated and split binding document as per Mark's comment - clean up codes as per Joe's comment - removed unused fields and clean up codes as per Francois's comment - removed module parameters as per Dave's comment - moved driver directory to samsung/sxgbe/ Changes since v3: - fixed Missing a blank line after declarations as per Dave's comment - clean up codes as per Joe's comment - removed reference of net_device.{irq, base_addr} as per Francois's comment Changes since v4: - updated binding document and DT related function as per Mark's comment Changes since v5: - updated binding document and DT related function as per Florian's comment - fixed typo and shortened code as per Joe's comment Changes since v6: - updated TSO related functions as per Rayagond's comment - updated binding document as per Mark's comment - removed WoL patch from this patch set Changes since v7: - updated TSO related functions as per Rayagond's comment Changes since v8: - removed select and depends statement from vendor sub-section as per Dave's comment Changes since v9: - removed adv-add-map, force-sf-dma-modei and force-thresh-dma-mode from binding documnet as per Mark's comment Changes since v10: - clean up codes as per Francois's comment Changes since v11: - clean up mdio_read/write codes as per Francois's comment - changed irq acquisition error path as per Francois's comment - updated mdio and platform related codes as per Tomasz'comment - clean up dma related codes as per Vince's comment Changes since v12: - fixed typo Changes since v13: - clean up error path codes for irqs as per Francois's comment - removed unsupported functions for ehttoolirq as per Ben's comment ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/samsung-sxgbe.txt52
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/samsung/Kconfig16
-rw-r--r--drivers/net/ethernet/samsung/Makefile5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Kconfig9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h535
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c262
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c515
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h298
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c382
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c524
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2311
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c251
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c254
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h104
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c259
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h488
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
-rw-r--r--include/linux/sxgbe_platform.h54
24 files changed, 6513 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
new file mode 100644
index 000000000000..989f6c95cfd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
@@ -0,0 +1,52 @@
1* Samsung 10G Ethernet driver (SXGBE)
2
3Required properties:
4- compatible: Should be "samsung,sxgbe-v2.0a"
5- reg: Address and length of the register set for the device
6- interrupt-parent: Should be the phandle for the interrupt controller
7 that services interrupts for this device
8- interrupts: Should contain the SXGBE interrupts
9 These interrupts are ordered by fixed and follows variable
10 trasmit DMA interrupts, receive DMA interrupts and lpi interrupt.
11 index 0 - this is fixed common interrupt of SXGBE and it is always
12 available.
13 index 1 to 25 - 8 variable trasmit interrupts, variable 16 receive interrupts
14 and 1 optional lpi interrupt.
15- phy-mode: String, operation mode of the PHY interface.
16 Supported values are: "sgmii", "xgmii".
17- samsung,pbl: Integer, Programmable Burst Length.
18 Supported values are 1, 2, 4, 8, 16, or 32.
19- samsung,burst-map: Integer, Program the possible bursts supported by sxgbe
20 This is an interger and represents allowable DMA bursts when fixed burst.
21 Allowable range is 0x01-0x3F. When this field is set fixed burst is enabled.
22 When fixed length is needed for burst mode, it can be set within allowable
23 range.
24
25Optional properties:
26- mac-address: 6 bytes, mac address
27- max-frame-size: Maximum Transfer Unit (IEEE defined MTU), rather
28 than the maximum frame size.
29
30Example:
31
32 aliases {
33 ethernet0 = <&sxgbe0>;
34 };
35
36 sxgbe0: ethernet@1a040000 {
37 compatible = "samsung,sxgbe-v2.0a";
38 reg = <0 0x1a040000 0 0x10000>;
39 interrupt-parent = <&gic>;
40 interrupts = <0 209 4>, <0 185 4>, <0 186 4>, <0 187 4>,
41 <0 188 4>, <0 189 4>, <0 190 4>, <0 191 4>,
42 <0 192 4>, <0 193 4>, <0 194 4>, <0 195 4>,
43 <0 196 4>, <0 197 4>, <0 198 4>, <0 199 4>,
44 <0 200 4>, <0 201 4>, <0 202 4>, <0 203 4>,
45 <0 204 4>, <0 205 4>, <0 206 4>, <0 207 4>,
46 <0 208 4>, <0 210 4>;
47 samsung,pbl = <0x08>
48 samsung,burst-map = <0x20>
49 mac-address = [ 00 11 22 33 44 55 ]; /* Filled in by U-Boot */
50 max-frame-size = <9000>;
51 phy-mode = "xgmii";
52 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 349a993fbda6..f779c91122bc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7550,6 +7550,15 @@ S: Supported
7550L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 7550L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
7551F: drivers/clk/samsung/ 7551F: drivers/clk/samsung/
7552 7552
7553SAMSUNG SXGBE DRIVERS
7554M: Byungho An <bh74.an@samsung.com>
7555M: Girish K S <ks.giri@samsung.com>
7556M: Siva Reddy Kallam <siva.kallam@samsung.com>
7557M: Vipul Pandya <vipul.pandya@samsung.com>
7558S: Supported
7559L: netdev@vger.kernel.org
7560F: drivers/net/ethernet/samsung/sxgbe/
7561
7553SERIAL DRIVERS 7562SERIAL DRIVERS
7554M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 7563M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7555L: linux-serial@vger.kernel.org 7564L: linux-serial@vger.kernel.org
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39484b534f5e..39b26fe28d10 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -150,6 +150,7 @@ config S6GMAC
150 To compile this driver as a module, choose M here. The module 150 To compile this driver as a module, choose M here. The module
151 will be called s6gmac. 151 will be called s6gmac.
152 152
153source "drivers/net/ethernet/samsung/Kconfig"
153source "drivers/net/ethernet/seeq/Kconfig" 154source "drivers/net/ethernet/seeq/Kconfig"
154source "drivers/net/ethernet/silan/Kconfig" 155source "drivers/net/ethernet/silan/Kconfig"
155source "drivers/net/ethernet/sis/Kconfig" 156source "drivers/net/ethernet/sis/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index adf61af507f7..545d0b3b9cb4 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
61obj-$(CONFIG_SH_ETH) += renesas/ 61obj-$(CONFIG_SH_ETH) += renesas/
62obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ 62obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
63obj-$(CONFIG_S6GMAC) += s6gmac.o 63obj-$(CONFIG_S6GMAC) += s6gmac.o
64obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
64obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 65obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
65obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ 66obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
66obj-$(CONFIG_NET_VENDOR_SIS) += sis/ 67obj-$(CONFIG_NET_VENDOR_SIS) += sis/
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
new file mode 100644
index 000000000000..7902341f2623
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -0,0 +1,16 @@
1#
2# Samsung Ethernet device configuration
3#
4
5config NET_VENDOR_SAMSUNG
6 bool "Samsung Ethernet device"
7 default y
8 ---help---
9 This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
10 platforms.
11
12if NET_VENDOR_SAMSUNG
13
14source "drivers/net/ethernet/samsung/sxgbe/Kconfig"
15
16endif # NET_VENDOR_SAMSUNG
diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile
new file mode 100644
index 000000000000..1773c29b8d76
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Samsung Ethernet device drivers.
3#
4
5obj-$(CONFIG_SXGBE_ETH) += sxgbe/
diff --git a/drivers/net/ethernet/samsung/sxgbe/Kconfig b/drivers/net/ethernet/samsung/sxgbe/Kconfig
new file mode 100644
index 000000000000..d79288c51d0a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Kconfig
@@ -0,0 +1,9 @@
1config SXGBE_ETH
2 tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA
4 select PHYLIB
5 select CRC32
6 select PTP_1588_CLOCK
7 ---help---
8 This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
9 platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
new file mode 100644
index 000000000000..dcc80b9d4370
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
2samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
3 sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \
4 sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
new file mode 100644
index 000000000000..6203c7d8550f
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -0,0 +1,535 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __SXGBE_COMMON_H__
14#define __SXGBE_COMMON_H__
15
16/* forward references */
17struct sxgbe_desc_ops;
18struct sxgbe_dma_ops;
19struct sxgbe_mtl_ops;
20
21#define SXGBE_RESOURCE_NAME "sam_sxgbeeth"
22#define DRV_MODULE_VERSION "November_2013"
23
24/* MAX HW feature words */
25#define SXGBE_HW_WORDS 3
26
27#define SXGBE_RX_COE_NONE 0
28
29/* CSR Frequency Access Defines*/
30#define SXGBE_CSR_F_150M 150000000
31#define SXGBE_CSR_F_250M 250000000
32#define SXGBE_CSR_F_300M 300000000
33#define SXGBE_CSR_F_350M 350000000
34#define SXGBE_CSR_F_400M 400000000
35#define SXGBE_CSR_F_500M 500000000
36
37/* pause time */
38#define SXGBE_PAUSE_TIME 0x200
39
40/* tx queues */
41#define SXGBE_TX_QUEUES 8
42#define SXGBE_RX_QUEUES 16
43
44/* Calculated based how much time does it take to fill 256KB Rx memory
45 * at 10Gb speed at 156MHz clock rate and considered little less then
46 * the actual value.
47 */
48#define SXGBE_MAX_DMA_RIWT 0x70
49#define SXGBE_MIN_DMA_RIWT 0x01
50
51/* Tx coalesce parameters */
52#define SXGBE_COAL_TX_TIMER 40000
53#define SXGBE_MAX_COAL_TX_TICK 100000
54#define SXGBE_TX_MAX_FRAMES 512
55#define SXGBE_TX_FRAMES 128
56
57/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */
58#define BUF_SIZE_16KiB 16384
59#define BUF_SIZE_8KiB 8192
60#define BUF_SIZE_4KiB 4096
61#define BUF_SIZE_2KiB 2048
62
63#define SXGBE_DEFAULT_LIT_LS 0x3E8
64#define SXGBE_DEFAULT_TWT_LS 0x0
65
66/* Flow Control defines */
67#define SXGBE_FLOW_OFF 0
68#define SXGBE_FLOW_RX 1
69#define SXGBE_FLOW_TX 2
70#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX)
71
72#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
73
74/* errors */
75#define RX_GMII_ERR 0x01
76#define RX_WATCHDOG_ERR 0x02
77#define RX_CRC_ERR 0x03
78#define RX_GAINT_ERR 0x04
79#define RX_IP_HDR_ERR 0x05
80#define RX_PAYLOAD_ERR 0x06
81#define RX_OVERFLOW_ERR 0x07
82
83/* pkt type */
84#define RX_LEN_PKT 0x00
85#define RX_MACCTL_PKT 0x01
86#define RX_DCBCTL_PKT 0x02
87#define RX_ARP_PKT 0x03
88#define RX_OAM_PKT 0x04
89#define RX_UNTAG_PKT 0x05
90#define RX_OTHER_PKT 0x07
91#define RX_SVLAN_PKT 0x08
92#define RX_CVLAN_PKT 0x09
93#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A
94#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B
95#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C
96#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D
97
98#define RX_NOT_IP_PKT 0x00
99#define RX_IPV4_TCP_PKT 0x01
100#define RX_IPV4_UDP_PKT 0x02
101#define RX_IPV4_ICMP_PKT 0x03
102#define RX_IPV4_UNKNOWN_PKT 0x07
103#define RX_IPV6_TCP_PKT 0x09
104#define RX_IPV6_UDP_PKT 0x0A
105#define RX_IPV6_ICMP_PKT 0x0B
106#define RX_IPV6_UNKNOWN_PKT 0x0F
107
108#define RX_NO_PTP 0x00
109#define RX_PTP_SYNC 0x01
110#define RX_PTP_FOLLOW_UP 0x02
111#define RX_PTP_DELAY_REQ 0x03
112#define RX_PTP_DELAY_RESP 0x04
113#define RX_PTP_PDELAY_REQ 0x05
114#define RX_PTP_PDELAY_RESP 0x06
115#define RX_PTP_PDELAY_FOLLOW_UP 0x07
116#define RX_PTP_ANNOUNCE 0x08
117#define RX_PTP_MGMT 0x09
118#define RX_PTP_SIGNAL 0x0A
119#define RX_PTP_RESV_MSG 0x0F
120
121/* EEE-LPI mode flags*/
122#define TX_ENTRY_LPI_MODE 0x10
123#define TX_EXIT_LPI_MODE 0x20
124#define RX_ENTRY_LPI_MODE 0x40
125#define RX_EXIT_LPI_MODE 0x80
126
127/* EEE-LPI Interrupt status flag */
128#define LPI_INT_STATUS BIT(5)
129
130/* EEE-LPI Default timer values */
131#define LPI_LINK_STATUS_TIMER 0x3E8
132#define LPI_MAC_WAIT_TIMER 0x00
133
134/* EEE-LPI Control and status definitions */
135#define LPI_CTRL_STATUS_TXA BIT(19)
136#define LPI_CTRL_STATUS_PLSDIS BIT(18)
137#define LPI_CTRL_STATUS_PLS BIT(17)
138#define LPI_CTRL_STATUS_LPIEN BIT(16)
139#define LPI_CTRL_STATUS_TXRSTP BIT(11)
140#define LPI_CTRL_STATUS_RXRSTP BIT(10)
141#define LPI_CTRL_STATUS_RLPIST BIT(9)
142#define LPI_CTRL_STATUS_TLPIST BIT(8)
143#define LPI_CTRL_STATUS_RLPIEX BIT(3)
144#define LPI_CTRL_STATUS_RLPIEN BIT(2)
145#define LPI_CTRL_STATUS_TLPIEX BIT(1)
146#define LPI_CTRL_STATUS_TLPIEN BIT(0)
147
148enum dma_irq_status {
149 tx_hard_error = BIT(0),
150 tx_bump_tc = BIT(1),
151 handle_tx = BIT(2),
152 rx_hard_error = BIT(3),
153 rx_bump_tc = BIT(4),
154 handle_rx = BIT(5),
155};
156
157#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \
158 NETIF_F_HW_VLAN_STAG_RX | \
159 NETIF_F_HW_VLAN_CTAG_TX | \
160 NETIF_F_HW_VLAN_STAG_TX | \
161 NETIF_F_HW_VLAN_CTAG_FILTER | \
162 NETIF_F_HW_VLAN_STAG_FILTER)
163
164/* MMC control defines */
165#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008
166
167/* SXGBE HW ADDR regs */
168#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
169 (reg * 8))
170#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
171 (reg * 8))
172#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */
173#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */
174
175/* SXGBE Frame Filter defines */
176#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
177#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
178#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
179#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
180#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
181#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
182#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
183#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
184#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
185#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
186
187#define SXGBE_HASH_TABLE_SIZE 64
188#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
189#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
190
191#define SXGBE_HI_REG_AE 0x80000000
192
193/* Minimum and maximum MTU */
194#define MIN_MTU 68
195#define MAX_MTU 9000
196
197#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \
198 for (queue_num = 0; queue_num < max_queues; queue_num++)
199
200#define DRV_VERSION "1.0.0"
201
202#define SXGBE_MAX_RX_CHANNELS 16
203#define SXGBE_MAX_TX_CHANNELS 16
204
205#define START_MAC_REG_OFFSET 0x0000
206#define MAX_MAC_REG_OFFSET 0x0DFC
207#define START_MTL_REG_OFFSET 0x1000
208#define MAX_MTL_REG_OFFSET 0x18FC
209#define START_DMA_REG_OFFSET 0x3000
210#define MAX_DMA_REG_OFFSET 0x38FC
211
212#define REG_SPACE_SIZE 0x2000
213
214/* sxgbe statistics counters */
215struct sxgbe_extra_stats {
216 /* TX/RX IRQ events */
217 unsigned long tx_underflow_irq;
218 unsigned long tx_process_stopped_irq;
219 unsigned long tx_ctxt_desc_err;
220 unsigned long tx_threshold;
221 unsigned long rx_threshold;
222 unsigned long tx_pkt_n;
223 unsigned long rx_pkt_n;
224 unsigned long normal_irq_n;
225 unsigned long tx_normal_irq_n;
226 unsigned long rx_normal_irq_n;
227 unsigned long napi_poll;
228 unsigned long tx_clean;
229 unsigned long tx_reset_ic_bit;
230 unsigned long rx_process_stopped_irq;
231 unsigned long rx_underflow_irq;
232
233 /* Bus access errors */
234 unsigned long fatal_bus_error_irq;
235 unsigned long tx_read_transfer_err;
236 unsigned long tx_write_transfer_err;
237 unsigned long tx_desc_access_err;
238 unsigned long tx_buffer_access_err;
239 unsigned long tx_data_transfer_err;
240 unsigned long rx_read_transfer_err;
241 unsigned long rx_write_transfer_err;
242 unsigned long rx_desc_access_err;
243 unsigned long rx_buffer_access_err;
244 unsigned long rx_data_transfer_err;
245
246 /* EEE-LPI stats */
247 unsigned long tx_lpi_entry_n;
248 unsigned long tx_lpi_exit_n;
249 unsigned long rx_lpi_entry_n;
250 unsigned long rx_lpi_exit_n;
251 unsigned long eee_wakeup_error_n;
252
253 /* RX specific */
254 /* L2 error */
255 unsigned long rx_code_gmii_err;
256 unsigned long rx_watchdog_err;
257 unsigned long rx_crc_err;
258 unsigned long rx_gaint_pkt_err;
259 unsigned long ip_hdr_err;
260 unsigned long ip_payload_err;
261 unsigned long overflow_error;
262
263 /* L2 Pkt type */
264 unsigned long len_pkt;
265 unsigned long mac_ctl_pkt;
266 unsigned long dcb_ctl_pkt;
267 unsigned long arp_pkt;
268 unsigned long oam_pkt;
269 unsigned long untag_okt;
270 unsigned long other_pkt;
271 unsigned long svlan_tag_pkt;
272 unsigned long cvlan_tag_pkt;
273 unsigned long dvlan_ocvlan_icvlan_pkt;
274 unsigned long dvlan_osvlan_isvlan_pkt;
275 unsigned long dvlan_osvlan_icvlan_pkt;
276 unsigned long dvan_ocvlan_icvlan_pkt;
277
278 /* L3/L4 Pkt type */
279 unsigned long not_ip_pkt;
280 unsigned long ip4_tcp_pkt;
281 unsigned long ip4_udp_pkt;
282 unsigned long ip4_icmp_pkt;
283 unsigned long ip4_unknown_pkt;
284 unsigned long ip6_tcp_pkt;
285 unsigned long ip6_udp_pkt;
286 unsigned long ip6_icmp_pkt;
287 unsigned long ip6_unknown_pkt;
288
289 /* Filter specific */
290 unsigned long vlan_filter_match;
291 unsigned long sa_filter_fail;
292 unsigned long da_filter_fail;
293 unsigned long hash_filter_pass;
294 unsigned long l3_filter_match;
295 unsigned long l4_filter_match;
296
297 /* RX context specific */
298 unsigned long timestamp_dropped;
299 unsigned long rx_msg_type_no_ptp;
300 unsigned long rx_ptp_type_sync;
301 unsigned long rx_ptp_type_follow_up;
302 unsigned long rx_ptp_type_delay_req;
303 unsigned long rx_ptp_type_delay_resp;
304 unsigned long rx_ptp_type_pdelay_req;
305 unsigned long rx_ptp_type_pdelay_resp;
306 unsigned long rx_ptp_type_pdelay_follow_up;
307 unsigned long rx_ptp_announce;
308 unsigned long rx_ptp_mgmt;
309 unsigned long rx_ptp_signal;
310 unsigned long rx_ptp_resv_msg_type;
311};
312
313struct mac_link {
314 int port;
315 int duplex;
316 int speed;
317};
318
319struct mii_regs {
320 unsigned int addr; /* MII Address */
321 unsigned int data; /* MII Data */
322};
323
324struct sxgbe_core_ops {
325 /* MAC core initialization */
326 void (*core_init)(void __iomem *ioaddr);
327 /* Dump MAC registers */
328 void (*dump_regs)(void __iomem *ioaddr);
329 /* Handle extra events on specific interrupts hw dependent */
330 int (*host_irq_status)(void __iomem *ioaddr,
331 struct sxgbe_extra_stats *x);
332 /* Set power management mode (e.g. magic frame) */
333 void (*pmt)(void __iomem *ioaddr, unsigned long mode);
334 /* Set/Get Unicast MAC addresses */
335 void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
336 unsigned int reg_n);
337 void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
338 unsigned int reg_n);
339 void (*enable_rx)(void __iomem *ioaddr, bool enable);
340 void (*enable_tx)(void __iomem *ioaddr, bool enable);
341
342 /* controller version specific operations */
343 int (*get_controller_version)(void __iomem *ioaddr);
344
345 /* If supported then get the optional core features */
346 unsigned int (*get_hw_feature)(void __iomem *ioaddr,
347 unsigned char feature_index);
348 /* adjust SXGBE speed */
349 void (*set_speed)(void __iomem *ioaddr, unsigned char speed);
350
351 /* EEE-LPI specific operations */
352 void (*set_eee_mode)(void __iomem *ioaddr);
353 void (*reset_eee_mode)(void __iomem *ioaddr);
354 void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
355 const int tw);
356 void (*set_eee_pls)(void __iomem *ioaddr, const int link);
357
358 /* Enable disable checksum offload operations */
359 void (*enable_rx_csum)(void __iomem *ioaddr);
360 void (*disable_rx_csum)(void __iomem *ioaddr);
361};
362
363const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
364
365struct sxgbe_ops {
366 const struct sxgbe_core_ops *mac;
367 const struct sxgbe_desc_ops *desc;
368 const struct sxgbe_dma_ops *dma;
369 const struct sxgbe_mtl_ops *mtl;
370 struct mii_regs mii; /* MII register Addresses */
371 struct mac_link link;
372 unsigned int ctrl_uid;
373 unsigned int ctrl_id;
374};
375
376/* SXGBE private data structures */
377struct sxgbe_tx_queue {
378 unsigned int irq_no;
379 struct sxgbe_priv_data *priv_ptr;
380 struct sxgbe_tx_norm_desc *dma_tx;
381 dma_addr_t dma_tx_phy;
382 dma_addr_t *tx_skbuff_dma;
383 struct sk_buff **tx_skbuff;
384 struct timer_list txtimer;
385 spinlock_t tx_lock; /* lock for tx queues */
386 unsigned int cur_tx;
387 unsigned int dirty_tx;
388 u32 tx_count_frames;
389 u32 tx_coal_frames;
390 u32 tx_coal_timer;
391 int hwts_tx_en;
392 u16 prev_mss;
393 u8 queue_no;
394};
395
396struct sxgbe_rx_queue {
397 struct sxgbe_priv_data *priv_ptr;
398 struct sxgbe_rx_norm_desc *dma_rx;
399 struct sk_buff **rx_skbuff;
400 unsigned int cur_rx;
401 unsigned int dirty_rx;
402 unsigned int irq_no;
403 u32 rx_riwt;
404 dma_addr_t *rx_skbuff_dma;
405 dma_addr_t dma_rx_phy;
406 u8 queue_no;
407};
408
409/* SXGBE HW capabilities */
410struct sxgbe_hw_features {
411 /****** CAP [0] *******/
412 unsigned int pmt_remote_wake_up;
413 unsigned int pmt_magic_frame;
414 /* IEEE 1588-2008 */
415 unsigned int atime_stamp;
416
417 unsigned int eee;
418
419 unsigned int tx_csum_offload;
420 unsigned int rx_csum_offload;
421 unsigned int multi_macaddr;
422 unsigned int tstamp_srcselect;
423 unsigned int sa_vlan_insert;
424
425 /****** CAP [1] *******/
426 unsigned int rxfifo_size;
427 unsigned int txfifo_size;
428 unsigned int atstmap_hword;
429 unsigned int dcb_enable;
430 unsigned int splithead_enable;
431 unsigned int tcpseg_offload;
432 unsigned int debug_mem;
433 unsigned int rss_enable;
434 unsigned int hash_tsize;
435 unsigned int l3l4_filer_size;
436
437 /* This value is in bytes and
438 * as mentioned in HW features
439 * of SXGBE data book
440 */
441 unsigned int rx_mtl_qsize;
442 unsigned int tx_mtl_qsize;
443
444 /****** CAP [2] *******/
445 /* TX and RX number of channels */
446 unsigned int rx_mtl_queues;
447 unsigned int tx_mtl_queues;
448 unsigned int rx_dma_channels;
449 unsigned int tx_dma_channels;
450 unsigned int pps_output_count;
451 unsigned int aux_input_count;
452};
453
454struct sxgbe_priv_data {
455 /* DMA descriptos */
456 struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES];
457 struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
458 u8 cur_rx_qnum;
459
460 unsigned int dma_tx_size;
461 unsigned int dma_rx_size;
462 unsigned int dma_buf_sz;
463 u32 rx_riwt;
464
465 struct napi_struct napi;
466
467 void __iomem *ioaddr;
468 struct net_device *dev;
469 struct device *device;
470 struct sxgbe_ops *hw; /* sxgbe specific ops */
471 int no_csum_insertion;
472 int irq;
473 int rxcsum_insertion;
474 spinlock_t stats_lock; /* lock for tx/rx statatics */
475
476 struct phy_device *phydev;
477 int oldlink;
478 int speed;
479 int oldduplex;
480 struct mii_bus *mii;
481 int mii_irq[PHY_MAX_ADDR];
482 u8 rx_pause;
483 u8 tx_pause;
484
485 struct sxgbe_extra_stats xstats;
486 struct sxgbe_plat_data *plat;
487 struct sxgbe_hw_features hw_cap;
488
489 u32 msg_enable;
490
491 struct clk *sxgbe_clk;
492 int clk_csr;
493 unsigned int mode;
494 unsigned int default_addend;
495
496 /* advanced time stamp support */
497 u32 adv_ts;
498 int use_riwt;
499 struct ptp_clock *ptp_clock;
500
501 /* tc control */
502 int tx_tc;
503 int rx_tc;
504 /* EEE-LPI specific members */
505 struct timer_list eee_ctrl_timer;
506 bool tx_path_in_lpi_mode;
507 int lpi_irq;
508 int eee_enabled;
509 int eee_active;
510 int tx_lpi_timer;
511};
512
513/* Function prototypes */
514struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
515 struct sxgbe_plat_data *plat_dat,
516 void __iomem *addr);
517int sxgbe_drv_remove(struct net_device *ndev);
518void sxgbe_set_ethtool_ops(struct net_device *netdev);
519int sxgbe_mdio_unregister(struct net_device *ndev);
520int sxgbe_mdio_register(struct net_device *ndev);
521int sxgbe_register_platform(void);
522void sxgbe_unregister_platform(void);
523
524#ifdef CONFIG_PM
525int sxgbe_suspend(struct net_device *ndev);
526int sxgbe_resume(struct net_device *ndev);
527int sxgbe_freeze(struct net_device *ndev);
528int sxgbe_restore(struct net_device *ndev);
529#endif /* CONFIG_PM */
530
531const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
532
533void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv);
534bool sxgbe_eee_init(struct sxgbe_priv_data * const priv);
535#endif /* __SXGBE_COMMON_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
new file mode 100644
index 000000000000..66d4a74a137c
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -0,0 +1,262 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/export.h>
16#include <linux/io.h>
17#include <linux/netdevice.h>
18#include <linux/phy.h>
19
20#include "sxgbe_common.h"
21#include "sxgbe_reg.h"
22
23/* MAC core initialization */
24static void sxgbe_core_init(void __iomem *ioaddr)
25{
26 u32 regval;
27
28 /* TX configuration */
29 regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
30 /* Other configurable parameters IFP, IPG, ISR, ISM
31 * needs to be set if needed
32 */
33 regval |= SXGBE_TX_JABBER_DISABLE;
34 writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
35
36 /* RX configuration */
37 regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
38 /* Other configurable parameters CST, SPEN, USP, GPSLCE
39 * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
40 * set if needed
41 */
42 regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
43 writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
44}
45
46/* Dump MAC registers */
47static void sxgbe_core_dump_regs(void __iomem *ioaddr)
48{
49}
50
51static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status)
52{
53 int status = 0;
54 int lpi_status;
55
56 /* Reading this register shall clear all the LPI status bits */
57 lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
58
59 if (lpi_status & LPI_CTRL_STATUS_TLPIEN)
60 status |= TX_ENTRY_LPI_MODE;
61 if (lpi_status & LPI_CTRL_STATUS_TLPIEX)
62 status |= TX_EXIT_LPI_MODE;
63 if (lpi_status & LPI_CTRL_STATUS_RLPIEN)
64 status |= RX_ENTRY_LPI_MODE;
65 if (lpi_status & LPI_CTRL_STATUS_RLPIEX)
66 status |= RX_EXIT_LPI_MODE;
67
68 return status;
69}
70
71/* Handle extra events on specific interrupts hw dependent */
72static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
73 struct sxgbe_extra_stats *x)
74{
75 int irq_status, status = 0;
76
77 irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG);
78
79 if (unlikely(irq_status & LPI_INT_STATUS))
80 status |= sxgbe_get_lpi_status(ioaddr, irq_status);
81
82 return status;
83}
84
85/* Set power management mode (e.g. magic frame) */
86static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
87{
88}
89
90/* Set/Get Unicast MAC addresses */
91static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
92 unsigned int reg_n)
93{
94 u32 high_word, low_word;
95
96 high_word = (addr[5] << 8) || (addr[4]);
97 low_word = ((addr[3] << 24) || (addr[2] << 16) ||
98 (addr[1] << 8) || (addr[0]));
99 writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
100 writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
101}
102
103static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
104 unsigned int reg_n)
105{
106 u32 high_word, low_word;
107
108 high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
109 low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
110
111 /* extract and assign address */
112 addr[5] = (high_word & 0x0000FF00) >> 8;
113 addr[4] = (high_word & 0x000000FF);
114 addr[3] = (low_word & 0xFF000000) >> 24;
115 addr[2] = (low_word & 0x00FF0000) >> 16;
116 addr[1] = (low_word & 0x0000FF00) >> 8;
117 addr[0] = (low_word & 0x000000FF);
118}
119
120static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
121{
122 u32 tx_config;
123
124 tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
125 tx_config &= ~SXGBE_TX_ENABLE;
126
127 if (enable)
128 tx_config |= SXGBE_TX_ENABLE;
129 writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
130}
131
132static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
133{
134 u32 rx_config;
135
136 rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
137 rx_config &= ~SXGBE_RX_ENABLE;
138
139 if (enable)
140 rx_config |= SXGBE_RX_ENABLE;
141 writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
142}
143
144static int sxgbe_get_controller_version(void __iomem *ioaddr)
145{
146 return readl(ioaddr + SXGBE_CORE_VERSION_REG);
147}
148
149/* If supported then get the optional core features */
150static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
151 unsigned char feature_index)
152{
153 return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
154}
155
156static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
157{
158 u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
159
160 /* clear the speed bits */
161 tx_cfg &= ~0x60000000;
162 tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
163
164 /* set the speed */
165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
166}
167
168static void sxgbe_set_eee_mode(void __iomem *ioaddr)
169{
170 u32 ctrl;
171
172 /* Enable the LPI mode for transmit path with Tx automate bit set.
173 * When Tx Automate bit is set, MAC internally handles the entry
174 * to LPI mode after all outstanding and pending packets are
175 * transmitted.
176 */
177 ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
178 ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA;
179 writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
180}
181
182static void sxgbe_reset_eee_mode(void __iomem *ioaddr)
183{
184 u32 ctrl;
185
186 ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
187 ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA);
188 writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
189}
190
191static void sxgbe_set_eee_pls(void __iomem *ioaddr, const int link)
192{
193 u32 ctrl;
194
195 ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
196
197 /* If the PHY link status is UP then set PLS */
198 if (link)
199 ctrl |= LPI_CTRL_STATUS_PLS;
200 else
201 ctrl &= ~LPI_CTRL_STATUS_PLS;
202
203 writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
204}
205
206static void sxgbe_set_eee_timer(void __iomem *ioaddr,
207 const int ls, const int tw)
208{
209 int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
210
211 /* Program the timers in the LPI timer control register:
212 * LS: minimum time (ms) for which the link
213 * status from PHY should be ok before transmitting
214 * the LPI pattern.
215 * TW: minimum time (us) for which the core waits
216 * after it has stopped transmitting the LPI pattern.
217 */
218 writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
219}
220
221static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
222{
223 u32 ctrl;
224
225 ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
226 ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
227 writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
228}
229
230static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
231{
232 u32 ctrl;
233
234 ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
235 ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
236 writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
237}
238
239const struct sxgbe_core_ops core_ops = {
240 .core_init = sxgbe_core_init,
241 .dump_regs = sxgbe_core_dump_regs,
242 .host_irq_status = sxgbe_core_host_irq_status,
243 .pmt = sxgbe_core_pmt,
244 .set_umac_addr = sxgbe_core_set_umac_addr,
245 .get_umac_addr = sxgbe_core_get_umac_addr,
246 .enable_rx = sxgbe_enable_rx,
247 .enable_tx = sxgbe_enable_tx,
248 .get_controller_version = sxgbe_get_controller_version,
249 .get_hw_feature = sxgbe_get_hw_feature,
250 .set_speed = sxgbe_core_set_speed,
251 .set_eee_mode = sxgbe_set_eee_mode,
252 .reset_eee_mode = sxgbe_reset_eee_mode,
253 .set_eee_timer = sxgbe_set_eee_timer,
254 .set_eee_pls = sxgbe_set_eee_pls,
255 .enable_rx_csum = sxgbe_enable_rx_csum,
256 .disable_rx_csum = sxgbe_disable_rx_csum,
257};
258
259const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
260{
261 return &core_ops;
262}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
new file mode 100644
index 000000000000..e896dbbd2e15
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -0,0 +1,515 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/bitops.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/netdevice.h>
19#include <linux/phy.h>
20
21#include "sxgbe_common.h"
22#include "sxgbe_dma.h"
23#include "sxgbe_desc.h"
24
25/* DMA TX descriptor ring initialization */
26static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
27{
28 p->tdes23.tx_rd_des23.own_bit = 0;
29}
30
31static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
32 u32 total_hdr_len, u32 tcp_hdr_len,
33 u32 tcp_payload_len)
34{
35 p->tdes23.tx_rd_des23.tse_bit = is_tse;
36 p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
37 p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
38 p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len;
39}
40
41/* Assign buffer lengths for descriptor */
42static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
43 int buf1_len, int pkt_len, int cksum)
44{
45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
49
50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
52}
53
54/* Set VLAN control information */
55static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
56{
57 p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
58}
59
60/* Set the owner of Normal descriptor */
61static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
62{
63 p->tdes23.tx_rd_des23.own_bit = 1;
64}
65
66/* Get the owner of Normal descriptor */
67static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
68{
69 return p->tdes23.tx_rd_des23.own_bit;
70}
71
72/* Invoked by the xmit function to close the tx descriptor */
73static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
74{
75 p->tdes23.tx_rd_des23.last_desc = 1;
76 p->tdes23.tx_rd_des23.int_on_com = 1;
77}
78
79/* Clean the tx descriptor as soon as the tx irq is received */
80static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
81{
82 memset(p, 0, sizeof(*p));
83}
84
85/* Clear interrupt on tx frame completion. When this bit is
86 * set an interrupt happens as soon as the frame is transmitted
87 */
88static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
89{
90 p->tdes23.tx_rd_des23.int_on_com = 0;
91}
92
93/* Last tx segment reports the transmit status */
94static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
95{
96 return p->tdes23.tx_rd_des23.last_desc;
97}
98
99/* Get the buffer size from the descriptor */
100static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
101{
102 return p->tdes23.tx_rd_des23.buf1_size;
103}
104
105/* Set tx timestamp enable bit */
106static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
107{
108 p->tdes23.tx_rd_des23.timestmp_enable = 1;
109}
110
111/* get tx timestamp status */
112static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
113{
114 return p->tdes23.tx_rd_des23.timestmp_enable;
115}
116
117/* TX Context Descripto Specific */
118static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
119{
120 p->ctxt_bit = 1;
121}
122
123/* Set the owner of TX context descriptor */
124static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
125{
126 p->own_bit = 1;
127}
128
129/* Get the owner of TX context descriptor */
130static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
131{
132 return p->own_bit;
133}
134
135/* Set TX mss in TX context Descriptor */
136static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss)
137{
138 p->maxseg_size = mss;
139}
140
141/* Get TX mss from TX context Descriptor */
142static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
143{
144 return p->maxseg_size;
145}
146
147/* Set TX tcmssv in TX context Descriptor */
148static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
149{
150 p->tcmssv = 1;
151}
152
153/* Reset TX ostc in TX context Descriptor */
154static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
155{
156 p->ostc = 0;
157}
158
159/* Set IVLAN information */
160static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
161 int is_ivlanvalid, int ivlan_tag,
162 int ivlan_ctl)
163{
164 if (is_ivlanvalid) {
165 p->ivlan_tag_valid = is_ivlanvalid;
166 p->ivlan_tag = ivlan_tag;
167 p->ivlan_tag_ctl = ivlan_ctl;
168 }
169}
170
171/* Return IVLAN Tag */
172static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
173{
174 return p->ivlan_tag;
175}
176
177/* Set VLAN Tag */
178static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
179 int is_vlanvalid, int vlan_tag)
180{
181 if (is_vlanvalid) {
182 p->vltag_valid = is_vlanvalid;
183 p->vlan_tag = vlan_tag;
184 }
185}
186
187/* Return VLAN Tag */
188static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
189{
190 return p->vlan_tag;
191}
192
193/* Set Time stamp */
194static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
195 u8 ostc_enable, u64 tstamp)
196{
197 if (ostc_enable) {
198 p->ostc = ostc_enable;
199 p->tstamp_lo = (u32) tstamp;
200 p->tstamp_hi = (u32) (tstamp>>32);
201 }
202}
203/* Close TX context descriptor */
204static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
205{
206 p->own_bit = 1;
207}
208
209/* WB status of context descriptor */
210static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
211{
212 return p->ctxt_desc_err;
213}
214
215/* DMA RX descriptor ring initialization */
216static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
217 int mode, int end)
218{
219 p->rdes23.rx_rd_des23.own_bit = 1;
220 if (disable_rx_ic)
221 p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
222}
223
224/* Get RX own bit */
225static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
226{
227 return p->rdes23.rx_rd_des23.own_bit;
228}
229
230/* Set RX own bit */
231static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
232{
233 p->rdes23.rx_rd_des23.own_bit = 1;
234}
235
236/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{
239 return p->rdes23.rx_wb_des23.pkt_len;
240}
241
242/* Return first Descriptor status */
243static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
244{
245 return p->rdes23.rx_wb_des23.first_desc;
246}
247
248/* Return Last Descriptor status */
249static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
250{
251 return p->rdes23.rx_wb_des23.last_desc;
252}
253
254
255/* Return the RX status looking at the WB fields */
256static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
257 struct sxgbe_extra_stats *x, int *checksum)
258{
259 int status = 0;
260
261 *checksum = CHECKSUM_UNNECESSARY;
262 if (p->rdes23.rx_wb_des23.err_summary) {
263 switch (p->rdes23.rx_wb_des23.err_l2_type) {
264 case RX_GMII_ERR:
265 status = -EINVAL;
266 x->rx_code_gmii_err++;
267 break;
268 case RX_WATCHDOG_ERR:
269 status = -EINVAL;
270 x->rx_watchdog_err++;
271 break;
272 case RX_CRC_ERR:
273 status = -EINVAL;
274 x->rx_crc_err++;
275 break;
276 case RX_GAINT_ERR:
277 status = -EINVAL;
278 x->rx_gaint_pkt_err++;
279 break;
280 case RX_IP_HDR_ERR:
281 *checksum = CHECKSUM_NONE;
282 x->ip_hdr_err++;
283 break;
284 case RX_PAYLOAD_ERR:
285 *checksum = CHECKSUM_NONE;
286 x->ip_payload_err++;
287 break;
288 case RX_OVERFLOW_ERR:
289 status = -EINVAL;
290 x->overflow_error++;
291 break;
292 default:
293 pr_err("Invalid Error type\n");
294 break;
295 }
296 } else {
297 switch (p->rdes23.rx_wb_des23.err_l2_type) {
298 case RX_LEN_PKT:
299 x->len_pkt++;
300 break;
301 case RX_MACCTL_PKT:
302 x->mac_ctl_pkt++;
303 break;
304 case RX_DCBCTL_PKT:
305 x->dcb_ctl_pkt++;
306 break;
307 case RX_ARP_PKT:
308 x->arp_pkt++;
309 break;
310 case RX_OAM_PKT:
311 x->oam_pkt++;
312 break;
313 case RX_UNTAG_PKT:
314 x->untag_okt++;
315 break;
316 case RX_OTHER_PKT:
317 x->other_pkt++;
318 break;
319 case RX_SVLAN_PKT:
320 x->svlan_tag_pkt++;
321 break;
322 case RX_CVLAN_PKT:
323 x->cvlan_tag_pkt++;
324 break;
325 case RX_DVLAN_OCVLAN_ICVLAN_PKT:
326 x->dvlan_ocvlan_icvlan_pkt++;
327 break;
328 case RX_DVLAN_OSVLAN_ISVLAN_PKT:
329 x->dvlan_osvlan_isvlan_pkt++;
330 break;
331 case RX_DVLAN_OSVLAN_ICVLAN_PKT:
332 x->dvlan_osvlan_icvlan_pkt++;
333 break;
334 case RX_DVLAN_OCVLAN_ISVLAN_PKT:
335 x->dvlan_ocvlan_icvlan_pkt++;
336 break;
337 default:
338 pr_err("Invalid L2 Packet type\n");
339 break;
340 }
341 }
342
343 /* L3/L4 Pkt type */
344 switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
345 case RX_NOT_IP_PKT:
346 x->not_ip_pkt++;
347 break;
348 case RX_IPV4_TCP_PKT:
349 x->ip4_tcp_pkt++;
350 break;
351 case RX_IPV4_UDP_PKT:
352 x->ip4_udp_pkt++;
353 break;
354 case RX_IPV4_ICMP_PKT:
355 x->ip4_icmp_pkt++;
356 break;
357 case RX_IPV4_UNKNOWN_PKT:
358 x->ip4_unknown_pkt++;
359 break;
360 case RX_IPV6_TCP_PKT:
361 x->ip6_tcp_pkt++;
362 break;
363 case RX_IPV6_UDP_PKT:
364 x->ip6_udp_pkt++;
365 break;
366 case RX_IPV6_ICMP_PKT:
367 x->ip6_icmp_pkt++;
368 break;
369 case RX_IPV6_UNKNOWN_PKT:
370 x->ip6_unknown_pkt++;
371 break;
372 default:
373 pr_err("Invalid L3/L4 Packet type\n");
374 break;
375 }
376
377 /* Filter */
378 if (p->rdes23.rx_wb_des23.vlan_filter_match)
379 x->vlan_filter_match++;
380
381 if (p->rdes23.rx_wb_des23.sa_filter_fail) {
382 status = -EINVAL;
383 x->sa_filter_fail++;
384 }
385 if (p->rdes23.rx_wb_des23.da_filter_fail) {
386 status = -EINVAL;
387 x->da_filter_fail++;
388 }
389 if (p->rdes23.rx_wb_des23.hash_filter_pass)
390 x->hash_filter_pass++;
391
392 if (p->rdes23.rx_wb_des23.l3_filter_match)
393 x->l3_filter_match++;
394
395 if (p->rdes23.rx_wb_des23.l4_filter_match)
396 x->l4_filter_match++;
397
398 return status;
399}
400
401/* Get own bit of context descriptor */
402static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
403{
404 return p->own_bit;
405}
406
407/* Set own bit for context descriptor */
408static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
409{
410 p->own_bit = 1;
411}
412
413
414/* Return the reception status looking at Context control information */
415static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
416 struct sxgbe_extra_stats *x)
417{
418 if (p->tstamp_dropped)
419 x->timestamp_dropped++;
420
421 /* ptp */
422 if (p->ptp_msgtype == RX_NO_PTP)
423 x->rx_msg_type_no_ptp++;
424 else if (p->ptp_msgtype == RX_PTP_SYNC)
425 x->rx_ptp_type_sync++;
426 else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
427 x->rx_ptp_type_follow_up++;
428 else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
429 x->rx_ptp_type_delay_req++;
430 else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
431 x->rx_ptp_type_delay_resp++;
432 else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
433 x->rx_ptp_type_pdelay_req++;
434 else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
435 x->rx_ptp_type_pdelay_resp++;
436 else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
437 x->rx_ptp_type_pdelay_follow_up++;
438 else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
439 x->rx_ptp_announce++;
440 else if (p->ptp_msgtype == RX_PTP_MGMT)
441 x->rx_ptp_mgmt++;
442 else if (p->ptp_msgtype == RX_PTP_SIGNAL)
443 x->rx_ptp_signal++;
444 else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
445 x->rx_ptp_resv_msg_type++;
446}
447
448/* Get rx timestamp status */
449static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
450{
451 if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
452 pr_err("Time stamp corrupted\n");
453 return 0;
454 }
455
456 return p->tstamp_available;
457}
458
459
460static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
461{
462 u64 ns;
463
464 ns = p->tstamp_lo;
465 ns |= ((u64)p->tstamp_hi) << 32;
466
467 return ns;
468}
469
470static const struct sxgbe_desc_ops desc_ops = {
471 .init_tx_desc = sxgbe_init_tx_desc,
472 .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse,
473 .prepare_tx_desc = sxgbe_prepare_tx_desc,
474 .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc,
475 .set_tx_owner = sxgbe_set_tx_owner,
476 .get_tx_owner = sxgbe_get_tx_owner,
477 .close_tx_desc = sxgbe_close_tx_desc,
478 .release_tx_desc = sxgbe_release_tx_desc,
479 .clear_tx_ic = sxgbe_clear_tx_ic,
480 .get_tx_ls = sxgbe_get_tx_ls,
481 .get_tx_len = sxgbe_get_tx_len,
482 .tx_enable_tstamp = sxgbe_tx_enable_tstamp,
483 .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status,
484 .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt,
485 .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner,
486 .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner,
487 .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss,
488 .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss,
489 .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv,
490 .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc,
491 .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag,
492 .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag,
493 .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag,
494 .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag,
495 .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp,
496 .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close,
497 .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde,
498 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status,
504 .rx_wbstatus = sxgbe_rx_wbstatus,
505 .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner,
506 .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner,
507 .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus,
508 .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status,
509 .get_timestamp = sxgbe_get_rx_timestamp,
510};
511
512const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void)
513{
514 return &desc_ops;
515}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
new file mode 100644
index 000000000000..838cb9fb0ea9
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -0,0 +1,298 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_DESC_H__
13#define __SXGBE_DESC_H__
14
15#define SXGBE_DESC_SIZE_BYTES 16
16
17/* forward declaration */
18struct sxgbe_extra_stats;
19
20/* Transmit checksum insertion control */
21enum tdes_csum_insertion {
22 cic_disabled = 0, /* Checksum Insertion Control */
23 cic_only_ip = 1, /* Only IP header */
24 /* IP header but pseudoheader is not calculated */
25 cic_no_pseudoheader = 2,
26 cic_full = 3, /* IP header and pseudoheader */
27};
28
29struct sxgbe_tx_norm_desc {
30 u64 tdes01; /* buf1 address */
31 union {
32 /* TX Read-Format Desc 2,3 */
33 struct {
34 /* TDES2 */
35 u32 buf1_size:14;
36 u32 vlan_tag_ctl:2;
37 u32 buf2_size:14;
38 u32 timestmp_enable:1;
39 u32 int_on_com:1;
40 /* TDES3 */
41 union {
42 u32 tcp_payload_len:18;
43 struct {
44 u32 total_pkt_len:15;
45 u32 reserved1:1;
46 u32 cksum_ctl:2;
47 } cksum_pktlen;
48 } tx_pkt_len;
49
50 u32 tse_bit:1;
51 u32 tcp_hdr_len:4;
52 u32 sa_insert_ctl:3;
53 u32 crc_pad_ctl:2;
54 u32 last_desc:1;
55 u32 first_desc:1;
56 u32 ctxt_bit:1;
57 u32 own_bit:1;
58 } tx_rd_des23;
59
60 /* tx write back Desc 2,3 */
61 struct {
62 /* WB TES2 */
63 u32 reserved1;
64 /* WB TES3 */
65 u32 reserved2:31;
66 u32 own_bit:1;
67 } tx_wb_des23;
68 } tdes23;
69};
70
71struct sxgbe_rx_norm_desc {
72 union {
73 u32 rdes0; /* buf1 address */
74 struct {
75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16;
77 } wb_rx_des0;
78 } rd_wb_des0;
79
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84
85 union {
86 /* RX Read format Desc 2,3 */
87 struct{
88 /* RDES2 */
89 u32 buf2_addr;
90 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1;
93 u32 own_bit:1;
94 } rx_rd_des23;
95
96 /* RX write back */
97 struct{
98 /* WB RDES2 */
99 u32 hdr_len:10;
100 u32 rdes2_reserved:2;
101 u32 elrd_val:1;
102 u32 iovt_sel:1;
103 u32 res_pkt:1;
104 u32 vlan_filter_match:1;
105 u32 sa_filter_fail:1;
106 u32 da_filter_fail:1;
107 u32 hash_filter_pass:1;
108 u32 macaddr_filter_match:8;
109 u32 l3_filter_match:1;
110 u32 l4_filter_match:1;
111 u32 l34_filter_num:3;
112
113 /* WB RDES3 */
114 u32 pkt_len:14;
115 u32 rdes3_reserved:1;
116 u32 err_summary:1;
117 u32 err_l2_type:4;
118 u32 layer34_pkt_type:4;
119 u32 no_coagulation_pkt:1;
120 u32 in_seq_pkt:1;
121 u32 rss_valid:1;
122 u32 context_des_avail:1;
123 u32 last_desc:1;
124 u32 first_desc:1;
125 u32 recv_context_desc:1;
126 u32 own_bit:1;
127 } rx_wb_des23;
128 } rdes23;
129};
130
131/* Context descriptor structure */
132struct sxgbe_tx_ctxt_desc {
133 u32 tstamp_lo;
134 u32 tstamp_hi;
135 u32 maxseg_size:15;
136 u32 reserved1:1;
137 u32 ivlan_tag:16;
138 u32 vlan_tag:16;
139 u32 vltag_valid:1;
140 u32 ivlan_tag_valid:1;
141 u32 ivlan_tag_ctl:2;
142 u32 reserved2:3;
143 u32 ctxt_desc_err:1;
144 u32 reserved3:2;
145 u32 ostc:1;
146 u32 tcmssv:1;
147 u32 reserved4:2;
148 u32 ctxt_bit:1;
149 u32 own_bit:1;
150};
151
152struct sxgbe_rx_ctxt_desc {
153 u32 tstamp_lo;
154 u32 tstamp_hi;
155 u32 reserved1;
156 u32 ptp_msgtype:4;
157 u32 tstamp_available:1;
158 u32 ptp_rsp_err:1;
159 u32 tstamp_dropped:1;
160 u32 reserved2:23;
161 u32 rx_ctxt_desc:1;
162 u32 own_bit:1;
163};
164
165struct sxgbe_desc_ops {
166 /* DMA TX descriptor ring initialization */
167 void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
168
169 /* Invoked by the xmit function to prepare the tx descriptor */
170 void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
171 u32 total_hdr_len, u32 tcp_hdr_len,
172 u32 tcp_payload_len);
173
174 /* Assign buffer lengths for descriptor */
175 void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
176 int buf1_len, int pkt_len, int cksum);
177
178 /* Set VLAN control information */
179 void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
180
181 /* Set the owner of the descriptor */
182 void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
183
184 /* Get the owner of the descriptor */
185 int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
186
187 /* Invoked by the xmit function to close the tx descriptor */
188 void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
189
190 /* Clean the tx descriptor as soon as the tx irq is received */
191 void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
192
193 /* Clear interrupt on tx frame completion. When this bit is
194 * set an interrupt happens as soon as the frame is transmitted
195 */
196 void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
197
198 /* Last tx segment reports the transmit status */
199 int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
200
201 /* Get the buffer size from the descriptor */
202 int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
203
204 /* Set tx timestamp enable bit */
205 void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
206
207 /* get tx timestamp status */
208 int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
209
210 /* TX Context Descripto Specific */
211 void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
212
213 /* Set the owner of the TX context descriptor */
214 void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
215
216 /* Get the owner of the TX context descriptor */
217 int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
218
219 /* Set TX mss */
220 void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss);
221
222 /* Set TX mss */
223 int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
224
225 /* Set TX tcmssv */
226 void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
227
228 /* Reset TX ostc */
229 void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
230
231 /* Set IVLAN information */
232 void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
233 int is_ivlanvalid, int ivlan_tag,
234 int ivlan_ctl);
235
236 /* Return IVLAN Tag */
237 int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
238
239 /* Set VLAN Tag */
240 void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
241 int is_vlanvalid, int vlan_tag);
242
243 /* Return VLAN Tag */
244 int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
245
246 /* Set Time stamp */
247 void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
248 u8 ostc_enable, u64 tstamp);
249
250 /* Close TX context descriptor */
251 void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
252
253 /* WB status of context descriptor */
254 int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
255
256 /* DMA RX descriptor ring initialization */
257 void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
258 int mode, int end);
259
260 /* Get own bit */
261 int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
262
263 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265
266 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268
269 /* Return first Descriptor status */
270 int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
271
272 /* Return first Descriptor status */
273 int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
274
275 /* Return the reception status looking at the RDES1 */
276 int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
277 struct sxgbe_extra_stats *x, int *checksum);
278
279 /* Get own bit */
280 int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
281
282 /* Set own bit */
283 void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
284
285 /* Return the reception status looking at Context control information */
286 void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
287 struct sxgbe_extra_stats *x);
288
289 /* Get rx timestamp status */
290 int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
291
292 /* Get timestamp value for rx, need to check this */
293 u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
294};
295
296const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void);
297
298#endif /* __SXGBE_DESC_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
new file mode 100644
index 000000000000..28f89c41d0cd
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -0,0 +1,382 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/io.h>
13#include <linux/delay.h>
14#include <linux/export.h>
15#include <linux/io.h>
16#include <linux/netdevice.h>
17#include <linux/phy.h>
18
19#include "sxgbe_common.h"
20#include "sxgbe_dma.h"
21#include "sxgbe_reg.h"
22#include "sxgbe_desc.h"
23
24/* DMA core initialization */
25static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
26{
27 int retry_count = 10;
28 u32 reg_val;
29
30 /* reset the DMA */
31 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
32 while (retry_count--) {
33 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
34 SXGBE_DMA_SOFT_RESET))
35 break;
36 mdelay(10);
37 }
38
39 if (retry_count < 0)
40 return -EBUSY;
41
42 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
43
44 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
45 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
46 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
47 * Set burst_map irrespective of fix_burst value.
48 */
49 if (!fix_burst)
50 reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
51
52 /* write burst len map */
53 reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
54
55 writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
56
57 return 0;
58}
59
60static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
61 int fix_burst, int pbl, dma_addr_t dma_tx,
62 dma_addr_t dma_rx, int t_rsize, int r_rsize)
63{
64 u32 reg_val;
65 dma_addr_t dma_addr;
66
67 reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
68 /* set the pbl */
69 if (fix_burst) {
70 reg_val |= SXGBE_DMA_PBL_X8MODE;
71 writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
72 /* program the TX pbl */
73 reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
74 reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
75 writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
76 /* program the RX pbl */
77 reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
78 reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
79 writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
80 }
81
82 /* program desc registers */
83 writel(upper_32_bits(dma_tx),
84 ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
85 writel(lower_32_bits(dma_tx),
86 ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
87
88 writel(upper_32_bits(dma_rx),
89 ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
90 writel(lower_32_bits(dma_rx),
91 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
92
93 /* program tail pointers */
94 /* assumption: upper 32 bits are constant and
95 * same as TX/RX desc list
96 */
97 dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
98 writel(lower_32_bits(dma_addr),
99 ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
100
101 dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
102 writel(lower_32_bits(dma_addr),
103 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
104 /* program the ring sizes */
105 writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
106 writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
107
108 /* Enable TX/RX interrupts */
109 writel(SXGBE_DMA_ENA_INT,
110 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
111}
112
113static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
114{
115 u32 tx_config;
116
117 tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
118 tx_config |= SXGBE_TX_START_DMA;
119 writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
120}
121
122static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
123{
124 /* Enable TX/RX interrupts */
125 writel(SXGBE_DMA_ENA_INT,
126 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
127}
128
129static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
130{
131 /* Disable TX/RX interrupts */
132 writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
133}
134
135static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
136{
137 int cnum;
138 u32 tx_ctl_reg;
139
140 for (cnum = 0; cnum < tchannels; cnum++) {
141 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
142 tx_ctl_reg |= SXGBE_TX_ENABLE;
143 writel(tx_ctl_reg,
144 ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
145 }
146}
147
148static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
149{
150 u32 tx_ctl_reg;
151
152 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
153 tx_ctl_reg |= SXGBE_TX_ENABLE;
154 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
155}
156
157static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
158{
159 u32 tx_ctl_reg;
160
161 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
162 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
163 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
164}
165
166static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
167{
168 int cnum;
169 u32 tx_ctl_reg;
170
171 for (cnum = 0; cnum < tchannels; cnum++) {
172 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
173 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
174 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
175 }
176}
177
178static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
179{
180 int cnum;
181 u32 rx_ctl_reg;
182
183 for (cnum = 0; cnum < rchannels; cnum++) {
184 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
185 rx_ctl_reg |= SXGBE_RX_ENABLE;
186 writel(rx_ctl_reg,
187 ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
188 }
189}
190
191static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
192{
193 int cnum;
194 u32 rx_ctl_reg;
195
196 for (cnum = 0; cnum < rchannels; cnum++) {
197 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
198 rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
199 writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
200 }
201}
202
203static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
204 struct sxgbe_extra_stats *x)
205{
206 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
207 u32 clear_val = 0;
208 u32 ret_val = 0;
209
210 /* TX Normal Interrupt Summary */
211 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
212 x->normal_irq_n++;
213 if (int_status & SXGBE_DMA_INT_STATUS_TI) {
214 ret_val |= handle_tx;
215 x->tx_normal_irq_n++;
216 clear_val |= SXGBE_DMA_INT_STATUS_TI;
217 }
218
219 if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
220 x->tx_underflow_irq++;
221 ret_val |= tx_bump_tc;
222 clear_val |= SXGBE_DMA_INT_STATUS_TBU;
223 }
224 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
225 /* TX Abnormal Interrupt Summary */
226 if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
227 ret_val |= tx_hard_error;
228 clear_val |= SXGBE_DMA_INT_STATUS_TPS;
229 x->tx_process_stopped_irq++;
230 }
231
232 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
233 ret_val |= tx_hard_error;
234 x->fatal_bus_error_irq++;
235
236 /* Assumption: FBE bit is the combination of
237 * all the bus access erros and cleared when
238 * the respective error bits cleared
239 */
240
241 /* check for actual cause */
242 if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
243 x->tx_read_transfer_err++;
244 clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
245 } else {
246 x->tx_write_transfer_err++;
247 }
248
249 if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
250 x->tx_desc_access_err++;
251 clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
252 } else {
253 x->tx_buffer_access_err++;
254 }
255
256 if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
257 x->tx_data_transfer_err++;
258 clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
259 }
260 }
261
262 /* context descriptor error */
263 if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
264 x->tx_ctxt_desc_err++;
265 clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
266 }
267 }
268
269 /* clear the served bits */
270 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
271
272 return ret_val;
273}
274
275static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
276 struct sxgbe_extra_stats *x)
277{
278 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
279 u32 clear_val = 0;
280 u32 ret_val = 0;
281
282 /* RX Normal Interrupt Summary */
283 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
284 x->normal_irq_n++;
285 if (int_status & SXGBE_DMA_INT_STATUS_RI) {
286 ret_val |= handle_rx;
287 x->rx_normal_irq_n++;
288 clear_val |= SXGBE_DMA_INT_STATUS_RI;
289 }
290 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
291 /* RX Abnormal Interrupt Summary */
292 if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
293 ret_val |= rx_bump_tc;
294 clear_val |= SXGBE_DMA_INT_STATUS_RBU;
295 x->rx_underflow_irq++;
296 }
297
298 if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
299 ret_val |= rx_hard_error;
300 clear_val |= SXGBE_DMA_INT_STATUS_RPS;
301 x->rx_process_stopped_irq++;
302 }
303
304 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
305 ret_val |= rx_hard_error;
306 x->fatal_bus_error_irq++;
307
308 /* Assumption: FBE bit is the combination of
309 * all the bus access erros and cleared when
310 * the respective error bits cleared
311 */
312
313 /* check for actual cause */
314 if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
315 x->rx_read_transfer_err++;
316 clear_val |= SXGBE_DMA_INT_STATUS_REB0;
317 } else {
318 x->rx_write_transfer_err++;
319 }
320
321 if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
322 x->rx_desc_access_err++;
323 clear_val |= SXGBE_DMA_INT_STATUS_REB1;
324 } else {
325 x->rx_buffer_access_err++;
326 }
327
328 if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
329 x->rx_data_transfer_err++;
330 clear_val |= SXGBE_DMA_INT_STATUS_REB2;
331 }
332 }
333 }
334
335 /* clear the served bits */
336 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
337
338 return ret_val;
339}
340
341/* Program the HW RX Watchdog */
342static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
343{
344 u32 que_num;
345
346 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
347 writel(riwt,
348 ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
349 }
350}
351
352static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
353{
354 u32 ctrl;
355
356 ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
357 ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
358 writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
359}
360
361static const struct sxgbe_dma_ops sxgbe_dma_ops = {
362 .init = sxgbe_dma_init,
363 .cha_init = sxgbe_dma_channel_init,
364 .enable_dma_transmission = sxgbe_enable_dma_transmission,
365 .enable_dma_irq = sxgbe_enable_dma_irq,
366 .disable_dma_irq = sxgbe_disable_dma_irq,
367 .start_tx = sxgbe_dma_start_tx,
368 .start_tx_queue = sxgbe_dma_start_tx_queue,
369 .stop_tx = sxgbe_dma_stop_tx,
370 .stop_tx_queue = sxgbe_dma_stop_tx_queue,
371 .start_rx = sxgbe_dma_start_rx,
372 .stop_rx = sxgbe_dma_stop_rx,
373 .tx_dma_int_status = sxgbe_tx_dma_int_status,
374 .rx_dma_int_status = sxgbe_rx_dma_int_status,
375 .rx_watchdog = sxgbe_dma_rx_watchdog,
376 .enable_tso = sxgbe_enable_tso,
377};
378
379const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
380{
381 return &sxgbe_dma_ops;
382}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
new file mode 100644
index 000000000000..1607b54c9bb0
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
@@ -0,0 +1,50 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_DMA_H__
13#define __SXGBE_DMA_H__
14
15/* forward declaration */
16struct sxgbe_extra_stats;
17
18#define SXGBE_DMA_BLENMAP_LSHIFT 1
19#define SXGBE_DMA_TXPBL_LSHIFT 16
20#define SXGBE_DMA_RXPBL_LSHIFT 16
21#define DEFAULT_DMA_PBL 8
22
23struct sxgbe_dma_ops {
24 /* DMA core initialization */
25 int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map);
26 void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst,
27 int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx,
28 int t_rzie, int r_rsize);
29 void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum);
30 void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
31 void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
32 void (*start_tx)(void __iomem *ioaddr, int tchannels);
33 void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum);
34 void (*stop_tx)(void __iomem *ioaddr, int tchannels);
35 void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum);
36 void (*start_rx)(void __iomem *ioaddr, int rchannels);
37 void (*stop_rx)(void __iomem *ioaddr, int rchannels);
38 int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no,
39 struct sxgbe_extra_stats *x);
40 int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no,
41 struct sxgbe_extra_stats *x);
42 /* Program the HW RX Watchdog */
43 void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt);
44 /* Enable TSO for each DMA channel */
45 void (*enable_tso)(void __iomem *ioaddr, u8 chan_num);
46};
47
48const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void);
49
50#endif /* __SXGBE_CORE_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
new file mode 100644
index 000000000000..0415fa50eeb7
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -0,0 +1,524 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/clk.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/net_tstamp.h>
20#include <linux/phy.h>
21#include <linux/ptp_clock_kernel.h>
22
23#include "sxgbe_common.h"
24#include "sxgbe_reg.h"
25#include "sxgbe_dma.h"
26
27struct sxgbe_stats {
28 char stat_string[ETH_GSTRING_LEN];
29 int sizeof_stat;
30 int stat_offset;
31};
32
33#define SXGBE_STAT(m) \
34{ \
35 #m, \
36 FIELD_SIZEOF(struct sxgbe_extra_stats, m), \
37 offsetof(struct sxgbe_priv_data, xstats.m) \
38}
39
40static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
41 /* TX/RX IRQ events */
42 SXGBE_STAT(tx_process_stopped_irq),
43 SXGBE_STAT(tx_ctxt_desc_err),
44 SXGBE_STAT(tx_threshold),
45 SXGBE_STAT(rx_threshold),
46 SXGBE_STAT(tx_pkt_n),
47 SXGBE_STAT(rx_pkt_n),
48 SXGBE_STAT(normal_irq_n),
49 SXGBE_STAT(tx_normal_irq_n),
50 SXGBE_STAT(rx_normal_irq_n),
51 SXGBE_STAT(napi_poll),
52 SXGBE_STAT(tx_clean),
53 SXGBE_STAT(tx_reset_ic_bit),
54 SXGBE_STAT(rx_process_stopped_irq),
55 SXGBE_STAT(rx_underflow_irq),
56
57 /* Bus access errors */
58 SXGBE_STAT(fatal_bus_error_irq),
59 SXGBE_STAT(tx_read_transfer_err),
60 SXGBE_STAT(tx_write_transfer_err),
61 SXGBE_STAT(tx_desc_access_err),
62 SXGBE_STAT(tx_buffer_access_err),
63 SXGBE_STAT(tx_data_transfer_err),
64 SXGBE_STAT(rx_read_transfer_err),
65 SXGBE_STAT(rx_write_transfer_err),
66 SXGBE_STAT(rx_desc_access_err),
67 SXGBE_STAT(rx_buffer_access_err),
68 SXGBE_STAT(rx_data_transfer_err),
69
70 /* EEE-LPI stats */
71 SXGBE_STAT(tx_lpi_entry_n),
72 SXGBE_STAT(tx_lpi_exit_n),
73 SXGBE_STAT(rx_lpi_entry_n),
74 SXGBE_STAT(rx_lpi_exit_n),
75 SXGBE_STAT(eee_wakeup_error_n),
76
77 /* RX specific */
78 /* L2 error */
79 SXGBE_STAT(rx_code_gmii_err),
80 SXGBE_STAT(rx_watchdog_err),
81 SXGBE_STAT(rx_crc_err),
82 SXGBE_STAT(rx_gaint_pkt_err),
83 SXGBE_STAT(ip_hdr_err),
84 SXGBE_STAT(ip_payload_err),
85 SXGBE_STAT(overflow_error),
86
87 /* L2 Pkt type */
88 SXGBE_STAT(len_pkt),
89 SXGBE_STAT(mac_ctl_pkt),
90 SXGBE_STAT(dcb_ctl_pkt),
91 SXGBE_STAT(arp_pkt),
92 SXGBE_STAT(oam_pkt),
93 SXGBE_STAT(untag_okt),
94 SXGBE_STAT(other_pkt),
95 SXGBE_STAT(svlan_tag_pkt),
96 SXGBE_STAT(cvlan_tag_pkt),
97 SXGBE_STAT(dvlan_ocvlan_icvlan_pkt),
98 SXGBE_STAT(dvlan_osvlan_isvlan_pkt),
99 SXGBE_STAT(dvlan_osvlan_icvlan_pkt),
100 SXGBE_STAT(dvan_ocvlan_icvlan_pkt),
101
102 /* L3/L4 Pkt type */
103 SXGBE_STAT(not_ip_pkt),
104 SXGBE_STAT(ip4_tcp_pkt),
105 SXGBE_STAT(ip4_udp_pkt),
106 SXGBE_STAT(ip4_icmp_pkt),
107 SXGBE_STAT(ip4_unknown_pkt),
108 SXGBE_STAT(ip6_tcp_pkt),
109 SXGBE_STAT(ip6_udp_pkt),
110 SXGBE_STAT(ip6_icmp_pkt),
111 SXGBE_STAT(ip6_unknown_pkt),
112
113 /* Filter specific */
114 SXGBE_STAT(vlan_filter_match),
115 SXGBE_STAT(sa_filter_fail),
116 SXGBE_STAT(da_filter_fail),
117 SXGBE_STAT(hash_filter_pass),
118 SXGBE_STAT(l3_filter_match),
119 SXGBE_STAT(l4_filter_match),
120
121 /* RX context specific */
122 SXGBE_STAT(timestamp_dropped),
123 SXGBE_STAT(rx_msg_type_no_ptp),
124 SXGBE_STAT(rx_ptp_type_sync),
125 SXGBE_STAT(rx_ptp_type_follow_up),
126 SXGBE_STAT(rx_ptp_type_delay_req),
127 SXGBE_STAT(rx_ptp_type_delay_resp),
128 SXGBE_STAT(rx_ptp_type_pdelay_req),
129 SXGBE_STAT(rx_ptp_type_pdelay_resp),
130 SXGBE_STAT(rx_ptp_type_pdelay_follow_up),
131 SXGBE_STAT(rx_ptp_announce),
132 SXGBE_STAT(rx_ptp_mgmt),
133 SXGBE_STAT(rx_ptp_signal),
134 SXGBE_STAT(rx_ptp_resv_msg_type),
135};
136#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
137
138static int sxgbe_get_eee(struct net_device *dev,
139 struct ethtool_eee *edata)
140{
141 struct sxgbe_priv_data *priv = netdev_priv(dev);
142
143 if (!priv->hw_cap.eee)
144 return -EOPNOTSUPP;
145
146 edata->eee_enabled = priv->eee_enabled;
147 edata->eee_active = priv->eee_active;
148 edata->tx_lpi_timer = priv->tx_lpi_timer;
149
150 return phy_ethtool_get_eee(priv->phydev, edata);
151}
152
153static int sxgbe_set_eee(struct net_device *dev,
154 struct ethtool_eee *edata)
155{
156 struct sxgbe_priv_data *priv = netdev_priv(dev);
157
158 priv->eee_enabled = edata->eee_enabled;
159
160 if (!priv->eee_enabled) {
161 sxgbe_disable_eee_mode(priv);
162 } else {
163 /* We are asking for enabling the EEE but it is safe
164 * to verify all by invoking the eee_init function.
165 * In case of failure it will return an error.
166 */
167 priv->eee_enabled = sxgbe_eee_init(priv);
168 if (!priv->eee_enabled)
169 return -EOPNOTSUPP;
170
171 /* Do not change tx_lpi_timer in case of failure */
172 priv->tx_lpi_timer = edata->tx_lpi_timer;
173 }
174
175 return phy_ethtool_set_eee(priv->phydev, edata);
176}
177
178static void sxgbe_getdrvinfo(struct net_device *dev,
179 struct ethtool_drvinfo *info)
180{
181 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
182 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
183}
184
185static int sxgbe_getsettings(struct net_device *dev,
186 struct ethtool_cmd *cmd)
187{
188 struct sxgbe_priv_data *priv = netdev_priv(dev);
189
190 if (priv->phydev)
191 return phy_ethtool_gset(priv->phydev, cmd);
192
193 return -EOPNOTSUPP;
194}
195
196static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198 struct sxgbe_priv_data *priv = netdev_priv(dev);
199
200 if (priv->phydev)
201 return phy_ethtool_sset(priv->phydev, cmd);
202
203 return -EOPNOTSUPP;
204}
205
206static u32 sxgbe_getmsglevel(struct net_device *dev)
207{
208 struct sxgbe_priv_data *priv = netdev_priv(dev);
209 return priv->msg_enable;
210}
211
212static void sxgbe_setmsglevel(struct net_device *dev, u32 level)
213{
214 struct sxgbe_priv_data *priv = netdev_priv(dev);
215 priv->msg_enable = level;
216}
217
218static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
219{
220 int i;
221 u8 *p = data;
222
223 switch (stringset) {
224 case ETH_SS_STATS:
225 for (i = 0; i < SXGBE_STATS_LEN; i++) {
226 memcpy(p, sxgbe_gstrings_stats[i].stat_string,
227 ETH_GSTRING_LEN);
228 p += ETH_GSTRING_LEN;
229 }
230 break;
231 default:
232 WARN_ON(1);
233 break;
234 }
235}
236
237static int sxgbe_get_sset_count(struct net_device *netdev, int sset)
238{
239 int len;
240
241 switch (sset) {
242 case ETH_SS_STATS:
243 len = SXGBE_STATS_LEN;
244 return len;
245 default:
246 return -EINVAL;
247 }
248}
249
250static void sxgbe_get_ethtool_stats(struct net_device *dev,
251 struct ethtool_stats *dummy, u64 *data)
252{
253 struct sxgbe_priv_data *priv = netdev_priv(dev);
254 int i;
255 char *p;
256
257 if (priv->eee_enabled) {
258 int val = phy_get_eee_err(priv->phydev);
259
260 if (val)
261 priv->xstats.eee_wakeup_error_n = val;
262 }
263
264 for (i = 0; i < SXGBE_STATS_LEN; i++) {
265 p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
266 data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64))
267 ? (*(u64 *)p) : (*(u32 *)p);
268 }
269}
270
271static void sxgbe_get_channels(struct net_device *dev,
272 struct ethtool_channels *channel)
273{
274 channel->max_rx = SXGBE_MAX_RX_CHANNELS;
275 channel->max_tx = SXGBE_MAX_TX_CHANNELS;
276 channel->rx_count = SXGBE_RX_QUEUES;
277 channel->tx_count = SXGBE_TX_QUEUES;
278}
279
280static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv)
281{
282 unsigned long clk = clk_get_rate(priv->sxgbe_clk);
283
284 if (!clk)
285 return 0;
286
287 return (riwt * 256) / (clk / 1000000);
288}
289
290static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv)
291{
292 unsigned long clk = clk_get_rate(priv->sxgbe_clk);
293
294 if (!clk)
295 return 0;
296
297 return (usec * (clk / 1000000)) / 256;
298}
299
300static int sxgbe_get_coalesce(struct net_device *dev,
301 struct ethtool_coalesce *ec)
302{
303 struct sxgbe_priv_data *priv = netdev_priv(dev);
304
305 if (priv->use_riwt)
306 ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv);
307
308 return 0;
309}
310
311static int sxgbe_set_coalesce(struct net_device *dev,
312 struct ethtool_coalesce *ec)
313{
314 struct sxgbe_priv_data *priv = netdev_priv(dev);
315 unsigned int rx_riwt;
316
317 if (!ec->rx_coalesce_usecs)
318 return -EINVAL;
319
320 rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv);
321
322 if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT))
323 return -EINVAL;
324 else if (!priv->use_riwt)
325 return -EOPNOTSUPP;
326
327 priv->rx_riwt = rx_riwt;
328 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
329
330 return 0;
331}
332
333static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
334 struct ethtool_rxnfc *cmd)
335{
336 cmd->data = 0;
337
338 /* Report default options for RSS on sxgbe */
339 switch (cmd->flow_type) {
340 case TCP_V4_FLOW:
341 case UDP_V4_FLOW:
342 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
343 case SCTP_V4_FLOW:
344 case AH_ESP_V4_FLOW:
345 case AH_V4_FLOW:
346 case ESP_V4_FLOW:
347 case IPV4_FLOW:
348 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
349 break;
350 case TCP_V6_FLOW:
351 case UDP_V6_FLOW:
352 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
353 case SCTP_V6_FLOW:
354 case AH_ESP_V6_FLOW:
355 case AH_V6_FLOW:
356 case ESP_V6_FLOW:
357 case IPV6_FLOW:
358 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
359 break;
360 default:
361 return -EINVAL;
362 }
363
364 return 0;
365}
366
367static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
368 u32 *rule_locs)
369{
370 struct sxgbe_priv_data *priv = netdev_priv(dev);
371 int ret = -EOPNOTSUPP;
372
373 switch (cmd->cmd) {
374 case ETHTOOL_GRXFH:
375 ret = sxgbe_get_rss_hash_opts(priv, cmd);
376 break;
377 default:
378 break;
379 }
380
381 return ret;
382}
383
384static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
385 struct ethtool_rxnfc *cmd)
386{
387 u32 reg_val = 0;
388
389 /* RSS does not support anything other than hashing
390 * to queues on src and dst IPs and ports
391 */
392 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
393 RXH_L4_B_0_1 | RXH_L4_B_2_3))
394 return -EINVAL;
395
396 switch (cmd->flow_type) {
397 case TCP_V4_FLOW:
398 case TCP_V6_FLOW:
399 if (!(cmd->data & RXH_IP_SRC) ||
400 !(cmd->data & RXH_IP_DST) ||
401 !(cmd->data & RXH_L4_B_0_1) ||
402 !(cmd->data & RXH_L4_B_2_3))
403 return -EINVAL;
404 reg_val = SXGBE_CORE_RSS_CTL_TCP4TE;
405 break;
406 case UDP_V4_FLOW:
407 case UDP_V6_FLOW:
408 if (!(cmd->data & RXH_IP_SRC) ||
409 !(cmd->data & RXH_IP_DST) ||
410 !(cmd->data & RXH_L4_B_0_1) ||
411 !(cmd->data & RXH_L4_B_2_3))
412 return -EINVAL;
413 reg_val = SXGBE_CORE_RSS_CTL_UDP4TE;
414 break;
415 case SCTP_V4_FLOW:
416 case AH_ESP_V4_FLOW:
417 case AH_V4_FLOW:
418 case ESP_V4_FLOW:
419 case AH_ESP_V6_FLOW:
420 case AH_V6_FLOW:
421 case ESP_V6_FLOW:
422 case SCTP_V6_FLOW:
423 case IPV4_FLOW:
424 case IPV6_FLOW:
425 if (!(cmd->data & RXH_IP_SRC) ||
426 !(cmd->data & RXH_IP_DST) ||
427 (cmd->data & RXH_L4_B_0_1) ||
428 (cmd->data & RXH_L4_B_2_3))
429 return -EINVAL;
430 reg_val = SXGBE_CORE_RSS_CTL_IP2TE;
431 break;
432 default:
433 return -EINVAL;
434 }
435
436 /* Read SXGBE RSS control register and update */
437 reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
438 writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
439 readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
440
441 return 0;
442}
443
444static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
445{
446 struct sxgbe_priv_data *priv = netdev_priv(dev);
447 int ret = -EOPNOTSUPP;
448
449 switch (cmd->cmd) {
450 case ETHTOOL_SRXFH:
451 ret = sxgbe_set_rss_hash_opt(priv, cmd);
452 break;
453 default:
454 break;
455 }
456
457 return ret;
458}
459
460static void sxgbe_get_regs(struct net_device *dev,
461 struct ethtool_regs *regs, void *space)
462{
463 struct sxgbe_priv_data *priv = netdev_priv(dev);
464 u32 *reg_space = (u32 *)space;
465 int reg_offset;
466 int reg_ix = 0;
467 void __iomem *ioaddr = priv->ioaddr;
468
469 memset(reg_space, 0x0, REG_SPACE_SIZE);
470
471 /* MAC registers */
472 for (reg_offset = START_MAC_REG_OFFSET;
473 reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
474 reg_space[reg_ix] = readl(ioaddr + reg_offset);
475 reg_ix++;
476 }
477
478 /* MTL registers */
479 for (reg_offset = START_MTL_REG_OFFSET;
480 reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
481 reg_space[reg_ix] = readl(ioaddr + reg_offset);
482 reg_ix++;
483 }
484
485 /* DMA registers */
486 for (reg_offset = START_DMA_REG_OFFSET;
487 reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
488 reg_space[reg_ix] = readl(ioaddr + reg_offset);
489 reg_ix++;
490 }
491
492 BUG_ON(reg_ix * 4 > REG_SPACE_SIZE);
493}
494
495static int sxgbe_get_regs_len(struct net_device *dev)
496{
497 return REG_SPACE_SIZE;
498}
499
500static const struct ethtool_ops sxgbe_ethtool_ops = {
501 .get_drvinfo = sxgbe_getdrvinfo,
502 .get_settings = sxgbe_getsettings,
503 .set_settings = sxgbe_setsettings,
504 .get_msglevel = sxgbe_getmsglevel,
505 .set_msglevel = sxgbe_setmsglevel,
506 .get_link = ethtool_op_get_link,
507 .get_strings = sxgbe_get_strings,
508 .get_ethtool_stats = sxgbe_get_ethtool_stats,
509 .get_sset_count = sxgbe_get_sset_count,
510 .get_channels = sxgbe_get_channels,
511 .get_coalesce = sxgbe_get_coalesce,
512 .set_coalesce = sxgbe_set_coalesce,
513 .get_rxnfc = sxgbe_get_rxnfc,
514 .set_rxnfc = sxgbe_set_rxnfc,
515 .get_regs = sxgbe_get_regs,
516 .get_regs_len = sxgbe_get_regs_len,
517 .get_eee = sxgbe_get_eee,
518 .set_eee = sxgbe_set_eee,
519};
520
521void sxgbe_set_ethtool_ops(struct net_device *netdev)
522{
523 SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
524}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
new file mode 100644
index 000000000000..1869d4c6e454
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -0,0 +1,2311 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/clk.h>
16#include <linux/crc32.h>
17#include <linux/dma-mapping.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/if.h>
21#include <linux/if_ether.h>
22#include <linux/if_vlan.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/kernel.h>
27#include <linux/mii.h>
28#include <linux/module.h>
29#include <linux/net_tstamp.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/platform_device.h>
33#include <linux/prefetch.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/tcp.h>
37#include <linux/sxgbe_platform.h>
38
39#include "sxgbe_common.h"
40#include "sxgbe_desc.h"
41#include "sxgbe_dma.h"
42#include "sxgbe_mtl.h"
43#include "sxgbe_reg.h"
44
45#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46#define JUMBO_LEN 9000
47
48/* Module parameters */
49#define TX_TIMEO 5000
50#define DMA_TX_SIZE 512
51#define DMA_RX_SIZE 1024
52#define TC_DEFAULT 64
53#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55#define SXGBE_DEFAULT_LPI_TIMER 1000
56
57static int debug = -1;
58static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
59
60module_param(eee_timer, int, S_IRUGO | S_IWUSR);
61
62module_param(debug, int, S_IRUGO | S_IWUSR);
63static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
64 NETIF_MSG_LINK | NETIF_MSG_IFUP |
65 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
66
67static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
68static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
69static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
70
71#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
72
73#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
74
75/**
76 * sxgbe_verify_args - verify the driver parameters.
77 * Description: it verifies if some wrong parameter is passed to the driver.
78 * Note that wrong parameters are replaced with the default values.
79 */
80static void sxgbe_verify_args(void)
81{
82 if (unlikely(eee_timer < 0))
83 eee_timer = SXGBE_DEFAULT_LPI_TIMER;
84}
85
86static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
87{
88 /* Check and enter in LPI mode */
89 if (!priv->tx_path_in_lpi_mode)
90 priv->hw->mac->set_eee_mode(priv->ioaddr);
91}
92
93void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
94{
95 /* Exit and disable EEE in case of we are are in LPI state. */
96 priv->hw->mac->reset_eee_mode(priv->ioaddr);
97 del_timer_sync(&priv->eee_ctrl_timer);
98 priv->tx_path_in_lpi_mode = false;
99}
100
101/**
102 * sxgbe_eee_ctrl_timer
103 * @arg : data hook
104 * Description:
105 * If there is no data transfer and if we are not in LPI state,
106 * then MAC Transmitter can be moved to LPI state.
107 */
108static void sxgbe_eee_ctrl_timer(unsigned long arg)
109{
110 struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
111
112 sxgbe_enable_eee_mode(priv);
113 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
114}
115
116/**
117 * sxgbe_eee_init
118 * @priv: private device pointer
119 * Description:
120 * If the EEE support has been enabled while configuring the driver,
121 * if the GMAC actually supports the EEE (from the HW cap reg) and the
122 * phy can also manage EEE, so enable the LPI state and start the timer
123 * to verify if the tx path can enter in LPI state.
124 */
125bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
126{
127 bool ret = false;
128
129 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1))
133 return false;
134
135 priv->eee_active = 1;
136 init_timer(&priv->eee_ctrl_timer);
137 priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
138 priv->eee_ctrl_timer.data = (unsigned long)priv;
139 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
140 add_timer(&priv->eee_ctrl_timer);
141
142 priv->hw->mac->set_eee_timer(priv->ioaddr,
143 SXGBE_DEFAULT_LPI_TIMER,
144 priv->tx_lpi_timer);
145
146 pr_info("Energy-Efficient Ethernet initialized\n");
147
148 ret = true;
149 }
150
151 return ret;
152}
153
154static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
155{
156 /* When the EEE has been already initialised we have to
157 * modify the PLS bit in the LPI ctrl & status reg according
158 * to the PHY link status. For this reason.
159 */
160 if (priv->eee_enabled)
161 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
162}
163
164/**
165 * sxgbe_clk_csr_set - dynamically set the MDC clock
166 * @priv: driver private structure
167 * Description: this is to dynamically set the MDC clock according to the csr
168 * clock input.
169 */
170static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
171{
172 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
173
174 /* assign the proper divider, this will be used during
175 * mdio communication
176 */
177 if (clk_rate < SXGBE_CSR_F_150M)
178 priv->clk_csr = SXGBE_CSR_100_150M;
179 else if (clk_rate <= SXGBE_CSR_F_250M)
180 priv->clk_csr = SXGBE_CSR_150_250M;
181 else if (clk_rate <= SXGBE_CSR_F_300M)
182 priv->clk_csr = SXGBE_CSR_250_300M;
183 else if (clk_rate <= SXGBE_CSR_F_350M)
184 priv->clk_csr = SXGBE_CSR_300_350M;
185 else if (clk_rate <= SXGBE_CSR_F_400M)
186 priv->clk_csr = SXGBE_CSR_350_400M;
187 else if (clk_rate <= SXGBE_CSR_F_500M)
188 priv->clk_csr = SXGBE_CSR_400_500M;
189}
190
191/* minimum number of free TX descriptors required to wake up TX process */
192#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
193
194static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
195{
196 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
197}
198
199/**
200 * sxgbe_adjust_link
201 * @dev: net device structure
202 * Description: it adjusts the link parameters.
203 */
204static void sxgbe_adjust_link(struct net_device *dev)
205{
206 struct sxgbe_priv_data *priv = netdev_priv(dev);
207 struct phy_device *phydev = priv->phydev;
208 u8 new_state = 0;
209 u8 speed = 0xff;
210
211 if (!phydev)
212 return;
213
214 /* SXGBE is not supporting auto-negotiation and
215 * half duplex mode. so, not handling duplex change
216 * in this function. only handling speed and link status
217 */
218 if (phydev->link) {
219 if (phydev->speed != priv->speed) {
220 new_state = 1;
221 switch (phydev->speed) {
222 case SPEED_10000:
223 speed = SXGBE_SPEED_10G;
224 break;
225 case SPEED_2500:
226 speed = SXGBE_SPEED_2_5G;
227 break;
228 case SPEED_1000:
229 speed = SXGBE_SPEED_1G;
230 break;
231 default:
232 netif_err(priv, link, dev,
233 "Speed (%d) not supported\n",
234 phydev->speed);
235 }
236
237 priv->speed = phydev->speed;
238 priv->hw->mac->set_speed(priv->ioaddr, speed);
239 }
240
241 if (!priv->oldlink) {
242 new_state = 1;
243 priv->oldlink = 1;
244 }
245 } else if (priv->oldlink) {
246 new_state = 1;
247 priv->oldlink = 0;
248 priv->speed = SPEED_UNKNOWN;
249 }
250
251 if (new_state & netif_msg_link(priv))
252 phy_print_status(phydev);
253
254 /* Alter the MAC settings for EEE */
255 sxgbe_eee_adjust(priv);
256}
257
258/**
259 * sxgbe_init_phy - PHY initialization
260 * @dev: net device structure
261 * Description: it initializes the driver's PHY state, and attaches the PHY
262 * to the mac driver.
263 * Return value:
264 * 0 on success
265 */
266static int sxgbe_init_phy(struct net_device *ndev)
267{
268 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
269 char bus_id[MII_BUS_ID_SIZE];
270 struct phy_device *phydev;
271 struct sxgbe_priv_data *priv = netdev_priv(ndev);
272 int phy_iface = priv->plat->interface;
273
274 /* assign default link status */
275 priv->oldlink = 0;
276 priv->speed = SPEED_UNKNOWN;
277 priv->oldduplex = DUPLEX_UNKNOWN;
278
279 if (priv->plat->phy_bus_name)
280 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
281 priv->plat->phy_bus_name, priv->plat->bus_id);
282 else
283 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
284 priv->plat->bus_id);
285
286 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
287 priv->plat->phy_addr);
288 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
289
290 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
291
292 if (IS_ERR(phydev)) {
293 netdev_err(ndev, "Could not attach to PHY\n");
294 return PTR_ERR(phydev);
295 }
296
297 /* Stop Advertising 1000BASE Capability if interface is not GMII */
298 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
299 (phy_iface == PHY_INTERFACE_MODE_RMII))
300 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
301 SUPPORTED_1000baseT_Full);
302 if (phydev->phy_id == 0) {
303 phy_disconnect(phydev);
304 return -ENODEV;
305 }
306
307 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
308 __func__, phydev->phy_id, phydev->link);
309
310 /* save phy device in private structure */
311 priv->phydev = phydev;
312
313 return 0;
314}
315
316/**
317 * sxgbe_clear_descriptors: clear descriptors
318 * @priv: driver private structure
319 * Description: this function is called to clear the tx and rx descriptors
320 * in case of both basic and extended descriptors are used.
321 */
322static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
323{
324 int i, j;
325 unsigned int txsize = priv->dma_tx_size;
326 unsigned int rxsize = priv->dma_rx_size;
327
328 /* Clear the Rx/Tx descriptors */
329 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
330 for (i = 0; i < rxsize; i++)
331 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
332 priv->use_riwt, priv->mode,
333 (i == rxsize - 1));
334 }
335
336 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
337 for (i = 0; i < txsize; i++)
338 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
339 }
340}
341
342static int sxgbe_init_rx_buffers(struct net_device *dev,
343 struct sxgbe_rx_norm_desc *p, int i,
344 unsigned int dma_buf_sz,
345 struct sxgbe_rx_queue *rx_ring)
346{
347 struct sxgbe_priv_data *priv = netdev_priv(dev);
348 struct sk_buff *skb;
349
350 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
351 if (!skb)
352 return -ENOMEM;
353
354 rx_ring->rx_skbuff[i] = skb;
355 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
356 dma_buf_sz, DMA_FROM_DEVICE);
357
358 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
359 netdev_err(dev, "%s: DMA mapping error\n", __func__);
360 dev_kfree_skb_any(skb);
361 return -EINVAL;
362 }
363
364 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
365
366 return 0;
367}
368/**
369 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure
371 * @tx_ring: ring to be intialised
372 * @tx_rsize: ring size
373 * Description: this function initializes the DMA TX descriptor
374 */
375static int init_tx_ring(struct device *dev, u8 queue_no,
376 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
377{
378 /* TX ring is not allcoated */
379 if (!tx_ring) {
380 dev_err(dev, "No memory for TX queue of SXGBE\n");
381 return -ENOMEM;
382 }
383
384 /* allocate memory for TX descriptors */
385 tx_ring->dma_tx = dma_zalloc_coherent(dev,
386 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
387 &tx_ring->dma_tx_phy, GFP_KERNEL);
388 if (!tx_ring->dma_tx)
389 return -ENOMEM;
390
391 /* allocate memory for TX skbuff array */
392 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
393 sizeof(dma_addr_t), GFP_KERNEL);
394 if (!tx_ring->tx_skbuff_dma)
395 goto dmamem_err;
396
397 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
398 sizeof(struct sk_buff *), GFP_KERNEL);
399
400 if (!tx_ring->tx_skbuff)
401 goto dmamem_err;
402
403 /* assign queue number */
404 tx_ring->queue_no = queue_no;
405
406 /* initalise counters */
407 tx_ring->dirty_tx = 0;
408 tx_ring->cur_tx = 0;
409
410 /* initalise TX queue lock */
411 spin_lock_init(&tx_ring->tx_lock);
412
413 return 0;
414
415dmamem_err:
416 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
417 tx_ring->dma_tx, tx_ring->dma_tx_phy);
418 return -ENOMEM;
419}
420
421/**
422 * free_rx_ring - free the RX descriptor ring
423 * @dev: net device structure
424 * @rx_ring: ring to be intialised
425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor
427 */
428void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
429 int rx_rsize)
430{
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
433 kfree(rx_ring->rx_skbuff_dma);
434 kfree(rx_ring->rx_skbuff);
435}
436
437/**
438 * init_rx_ring - init the RX descriptor ring
439 * @dev: net device structure
440 * @rx_ring: ring to be intialised
441 * @rx_rsize: ring size
442 * Description: this function initializes the DMA RX descriptor
443 */
444static int init_rx_ring(struct net_device *dev, u8 queue_no,
445 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
446{
447 struct sxgbe_priv_data *priv = netdev_priv(dev);
448 int desc_index;
449 unsigned int bfsize = 0;
450 unsigned int ret = 0;
451
452 /* Set the max buffer size according to the MTU. */
453 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
454
455 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
456
457 /* RX ring is not allcoated */
458 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n");
460 goto error;
461 }
462
463 /* assign queue number */
464 rx_ring->queue_no = queue_no;
465
466 /* allocate memory for RX descriptors */
467 rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
468 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
469 &rx_ring->dma_rx_phy, GFP_KERNEL);
470
471 if (rx_ring->dma_rx == NULL)
472 goto error;
473
474 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL);
477 if (rx_ring->rx_skbuff_dma == NULL)
478 goto dmamem_err;
479
480 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
481 sizeof(struct sk_buff *), GFP_KERNEL);
482 if (rx_ring->rx_skbuff == NULL)
483 goto rxbuff_err;
484
485 /* initialise the buffers */
486 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
487 struct sxgbe_rx_norm_desc *p;
488 p = rx_ring->dma_rx + desc_index;
489 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
490 bfsize, rx_ring);
491 if (ret)
492 goto err_init_rx_buffers;
493 }
494
495 /* initalise counters */
496 rx_ring->cur_rx = 0;
497 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
498 priv->dma_buf_sz = bfsize;
499
500 return 0;
501
502err_init_rx_buffers:
503 while (--desc_index >= 0)
504 free_rx_ring(priv->device, rx_ring, desc_index);
505 kfree(rx_ring->rx_skbuff);
506rxbuff_err:
507 kfree(rx_ring->rx_skbuff_dma);
508dmamem_err:
509 dma_free_coherent(priv->device,
510 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
511 rx_ring->dma_rx, rx_ring->dma_rx_phy);
512error:
513 return -ENOMEM;
514}
515/**
516 * free_tx_ring - free the TX descriptor ring
517 * @dev: net device structure
518 * @tx_ring: ring to be intialised
519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor
521 */
522void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
523 int tx_rsize)
524{
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
527}
528
529/**
530 * init_dma_desc_rings - init the RX/TX descriptor rings
531 * @dev: net device structure
532 * Description: this function initializes the DMA RX/TX descriptors
533 * and allocates the socket buffers. It suppors the chained and ring
534 * modes.
535 */
536static int init_dma_desc_rings(struct net_device *netd)
537{
538 int queue_num, ret;
539 struct sxgbe_priv_data *priv = netdev_priv(netd);
540 int tx_rsize = priv->dma_tx_size;
541 int rx_rsize = priv->dma_rx_size;
542
543 /* Allocate memory for queue structures and TX descs */
544 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
545 ret = init_tx_ring(priv->device, queue_num,
546 priv->txq[queue_num], tx_rsize);
547 if (ret) {
548 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
549 goto txalloc_err;
550 }
551
552 /* save private pointer in each ring this
553 * pointer is needed during cleaing TX queue
554 */
555 priv->txq[queue_num]->priv_ptr = priv;
556 }
557
558 /* Allocate memory for queue structures and RX descs */
559 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
560 ret = init_rx_ring(netd, queue_num,
561 priv->rxq[queue_num], rx_rsize);
562 if (ret) {
563 netdev_err(netd, "RX DMA ring allocation failed!!\n");
564 goto rxalloc_err;
565 }
566
567 /* save private pointer in each ring this
568 * pointer is needed during cleaing TX queue
569 */
570 priv->rxq[queue_num]->priv_ptr = priv;
571 }
572
573 sxgbe_clear_descriptors(priv);
574
575 return 0;
576
577txalloc_err:
578 while (queue_num--)
579 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
580 return ret;
581
582rxalloc_err:
583 while (queue_num--)
584 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
585 return ret;
586}
587
588static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
589{
590 int dma_desc;
591 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
592 int tx_rsize = priv->dma_tx_size;
593
594 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
595 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
596
597 if (txqueue->tx_skbuff_dma[dma_desc])
598 dma_unmap_single(priv->device,
599 txqueue->tx_skbuff_dma[dma_desc],
600 priv->hw->desc->get_tx_len(tdesc),
601 DMA_TO_DEVICE);
602
603 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
604 txqueue->tx_skbuff[dma_desc] = NULL;
605 txqueue->tx_skbuff_dma[dma_desc] = 0;
606 }
607}
608
609
610static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
611{
612 int queue_num;
613
614 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
615 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
616 tx_free_ring_skbufs(tqueue);
617 }
618}
619
620static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
621{
622 int queue_num;
623 int tx_rsize = priv->dma_tx_size;
624 int rx_rsize = priv->dma_rx_size;
625
626 /* Release the DMA TX buffers */
627 dma_free_tx_skbufs(priv);
628
629 /* Release the TX ring memory also */
630 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
631 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
632 }
633
634 /* Release the RX ring memory also */
635 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
636 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
637 }
638}
639
640static int txring_mem_alloc(struct sxgbe_priv_data *priv)
641{
642 int queue_num;
643
644 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
645 priv->txq[queue_num] = devm_kmalloc(priv->device,
646 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
647 if (!priv->txq[queue_num])
648 return -ENOMEM;
649 }
650
651 return 0;
652}
653
654static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
655{
656 int queue_num;
657
658 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
659 priv->rxq[queue_num] = devm_kmalloc(priv->device,
660 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
661 if (!priv->rxq[queue_num])
662 return -ENOMEM;
663 }
664
665 return 0;
666}
667
668/**
669 * sxgbe_mtl_operation_mode - HW MTL operation mode
670 * @priv: driver private structure
671 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
672 * or Store-And-Forward capability.
673 */
674static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
675{
676 int queue_num;
677
678 /* TX/RX threshold control */
679 if (likely(priv->plat->force_sf_dma_mode)) {
680 /* set TC mode for TX QUEUES */
681 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
682 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
683 SXGBE_MTL_SFMODE);
684 priv->tx_tc = SXGBE_MTL_SFMODE;
685
686 /* set TC mode for RX QUEUES */
687 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
688 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
689 SXGBE_MTL_SFMODE);
690 priv->rx_tc = SXGBE_MTL_SFMODE;
691 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
692 /* set TC mode for TX QUEUES */
693 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
694 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
695 priv->tx_tc);
696 /* set TC mode for RX QUEUES */
697 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
698 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
699 priv->rx_tc);
700 } else {
701 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
702 }
703}
704
705/**
706 * sxgbe_tx_queue_clean:
707 * @priv: driver private structure
708 * Description: it reclaims resources after transmission completes.
709 */
710static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
711{
712 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
713 unsigned int tx_rsize = priv->dma_tx_size;
714 struct netdev_queue *dev_txq;
715 u8 queue_no = tqueue->queue_no;
716
717 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
718
719 spin_lock(&tqueue->tx_lock);
720
721 priv->xstats.tx_clean++;
722 while (tqueue->dirty_tx != tqueue->cur_tx) {
723 unsigned int entry = tqueue->dirty_tx % tx_rsize;
724 struct sk_buff *skb = tqueue->tx_skbuff[entry];
725 struct sxgbe_tx_norm_desc *p;
726
727 p = tqueue->dma_tx + entry;
728
729 /* Check if the descriptor is owned by the DMA. */
730 if (priv->hw->desc->get_tx_owner(p))
731 break;
732
733 if (netif_msg_tx_done(priv))
734 pr_debug("%s: curr %d, dirty %d\n",
735 __func__, tqueue->cur_tx, tqueue->dirty_tx);
736
737 if (likely(tqueue->tx_skbuff_dma[entry])) {
738 dma_unmap_single(priv->device,
739 tqueue->tx_skbuff_dma[entry],
740 priv->hw->desc->get_tx_len(p),
741 DMA_TO_DEVICE);
742 tqueue->tx_skbuff_dma[entry] = 0;
743 }
744
745 if (likely(skb)) {
746 dev_kfree_skb(skb);
747 tqueue->tx_skbuff[entry] = NULL;
748 }
749
750 priv->hw->desc->release_tx_desc(p);
751
752 tqueue->dirty_tx++;
753 }
754
755 /* wake up queue */
756 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
757 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
758 netif_tx_lock(priv->dev);
759 if (netif_tx_queue_stopped(dev_txq) &&
760 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
761 if (netif_msg_tx_done(priv))
762 pr_debug("%s: restart transmit\n", __func__);
763 netif_tx_wake_queue(dev_txq);
764 }
765 netif_tx_unlock(priv->dev);
766 }
767
768 spin_unlock(&tqueue->tx_lock);
769}
770
771/**
772 * sxgbe_tx_clean:
773 * @priv: driver private structure
774 * Description: it reclaims resources after transmission completes.
775 */
776static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
777{
778 u8 queue_num;
779
780 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
781 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
782
783 sxgbe_tx_queue_clean(tqueue);
784 }
785
786 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
787 sxgbe_enable_eee_mode(priv);
788 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
789 }
790}
791
792/**
793 * sxgbe_restart_tx_queue: irq tx error mng function
794 * @priv: driver private structure
795 * Description: it cleans the descriptors and restarts the transmission
796 * in case of errors.
797 */
798static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
799{
800 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
801 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
802 queue_num);
803
804 /* stop the queue */
805 netif_tx_stop_queue(dev_txq);
806
807 /* stop the tx dma */
808 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
809
810 /* free the skbuffs of the ring */
811 tx_free_ring_skbufs(tx_ring);
812
813 /* initalise counters */
814 tx_ring->cur_tx = 0;
815 tx_ring->dirty_tx = 0;
816
817 /* start the tx dma */
818 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
819
820 priv->dev->stats.tx_errors++;
821
822 /* wakeup the queue */
823 netif_tx_wake_queue(dev_txq);
824}
825
826/**
827 * sxgbe_reset_all_tx_queues: irq tx error mng function
828 * @priv: driver private structure
829 * Description: it cleans all the descriptors and
830 * restarts the transmission on all queues in case of errors.
831 */
832static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
833{
834 int queue_num;
835
836 /* On TX timeout of net device, resetting of all queues
837 * may not be proper way, revisit this later if needed
838 */
839 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
840 sxgbe_restart_tx_queue(priv, queue_num);
841}
842
843/**
844 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
845 * @priv: driver private structure
846 * Description:
847 * new GMAC chip generations have a new register to indicate the
848 * presence of the optional feature/functions.
849 * This can be also used to override the value passed through the
850 * platform and necessary for old MAC10/100 and GMAC chips.
851 */
852static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
853{
854 int rval = 0;
855 struct sxgbe_hw_features *features = &priv->hw_cap;
856
857 /* Read First Capability Register CAP[0] */
858 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
859 if (rval) {
860 features->pmt_remote_wake_up =
861 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
862 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
863 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
864 features->tx_csum_offload =
865 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
866 features->rx_csum_offload =
867 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
868 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
869 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
870 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
871 features->eee = SXGBE_HW_FEAT_EEE(rval);
872 }
873
874 /* Read First Capability Register CAP[1] */
875 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
876 if (rval) {
877 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
878 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
879 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
880 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
881 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
882 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
883 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
884 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
885 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
886 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
887 }
888
889 /* Read First Capability Register CAP[2] */
890 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
891 if (rval) {
892 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
893 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
894 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
895 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
896 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
897 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
898 }
899
900 return rval;
901}
902
903/**
904 * sxgbe_check_ether_addr: check if the MAC addr is valid
905 * @priv: driver private structure
906 * Description:
907 * it is to verify if the MAC address is valid, in case of failures it
908 * generates a random MAC address
909 */
910static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
911{
912 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
913 priv->hw->mac->get_umac_addr((void __iomem *)
914 priv->ioaddr,
915 priv->dev->dev_addr, 0);
916 if (!is_valid_ether_addr(priv->dev->dev_addr))
917 eth_hw_addr_random(priv->dev);
918 }
919 dev_info(priv->device, "device MAC address %pM\n",
920 priv->dev->dev_addr);
921}
922
923/**
924 * sxgbe_init_dma_engine: DMA init.
925 * @priv: driver private structure
926 * Description:
927 * It inits the DMA invoking the specific SXGBE callback.
928 * Some DMA parameters can be passed from the platform;
929 * in case of these are not passed a default is kept for the MAC or GMAC.
930 */
931static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
932{
933 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
934 int queue_num;
935
936 if (priv->plat->dma_cfg) {
937 pbl = priv->plat->dma_cfg->pbl;
938 fixed_burst = priv->plat->dma_cfg->fixed_burst;
939 burst_map = priv->plat->dma_cfg->burst_map;
940 }
941
942 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
943 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
944 fixed_burst, pbl,
945 (priv->txq[queue_num])->dma_tx_phy,
946 (priv->rxq[queue_num])->dma_rx_phy,
947 priv->dma_tx_size, priv->dma_rx_size);
948
949 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
950}
951
952/**
953 * sxgbe_init_mtl_engine: MTL init.
954 * @priv: driver private structure
955 * Description:
956 * It inits the MTL invoking the specific SXGBE callback.
957 */
958static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
959{
960 int queue_num;
961
962 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
963 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
964 priv->hw_cap.tx_mtl_qsize);
965 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
966 }
967}
968
969/**
970 * sxgbe_disable_mtl_engine: MTL disable.
971 * @priv: driver private structure
972 * Description:
973 * It disables the MTL queues by invoking the specific SXGBE callback.
974 */
975static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
976{
977 int queue_num;
978
979 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
980 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
981}
982
983
984/**
985 * sxgbe_tx_timer: mitigation sw timer for tx.
986 * @data: data pointer
987 * Description:
988 * This is the timer handler to directly invoke the sxgbe_tx_clean.
989 */
990static void sxgbe_tx_timer(unsigned long data)
991{
992 struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
993 sxgbe_tx_queue_clean(p);
994}
995
996/**
997 * sxgbe_init_tx_coalesce: init tx mitigation options.
998 * @priv: driver private structure
999 * Description:
1000 * This inits the transmit coalesce parameters: i.e. timer rate,
1001 * timer handler and default threshold used for enabling the
1002 * interrupt on completion bit.
1003 */
1004static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1005{
1006 u8 queue_num;
1007
1008 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1009 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1010 p->tx_coal_frames = SXGBE_TX_FRAMES;
1011 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1012 init_timer(&p->txtimer);
1013 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1014 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
1015 p->txtimer.function = sxgbe_tx_timer;
1016 add_timer(&p->txtimer);
1017 }
1018}
1019
1020static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1021{
1022 u8 queue_num;
1023
1024 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1025 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1026 del_timer_sync(&p->txtimer);
1027 }
1028}
1029
1030/**
1031 * sxgbe_open - open entry point of the driver
1032 * @dev : pointer to the device structure.
1033 * Description:
1034 * This function is the open entry point of the driver.
1035 * Return value:
1036 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1037 * file on failure.
1038 */
1039static int sxgbe_open(struct net_device *dev)
1040{
1041 struct sxgbe_priv_data *priv = netdev_priv(dev);
1042 int ret, queue_num;
1043
1044 clk_prepare_enable(priv->sxgbe_clk);
1045
1046 sxgbe_check_ether_addr(priv);
1047
1048 /* Init the phy */
1049 ret = sxgbe_init_phy(dev);
1050 if (ret) {
1051 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1052 __func__, ret);
1053 goto phy_error;
1054 }
1055
1056 /* Create and initialize the TX/RX descriptors chains. */
1057 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1058 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1059 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1060 priv->tx_tc = TC_DEFAULT;
1061 priv->rx_tc = TC_DEFAULT;
1062 init_dma_desc_rings(dev);
1063
1064 /* DMA initialization and SW reset */
1065 ret = sxgbe_init_dma_engine(priv);
1066 if (ret < 0) {
1067 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1068 goto init_error;
1069 }
1070
1071 /* MTL initialization */
1072 sxgbe_init_mtl_engine(priv);
1073
1074 /* Copy the MAC addr into the HW */
1075 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1076
1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr);
1079
1080 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1082 IRQF_SHARED, dev->name, dev);
1083 if (unlikely(ret < 0)) {
1084 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1085 __func__, priv->irq, ret);
1086 goto init_error;
1087 }
1088
1089 /* If the LPI irq is different from the mac irq
1090 * register a dedicated handler
1091 */
1092 if (priv->lpi_irq != dev->irq) {
1093 ret = devm_request_irq(priv->device, priv->lpi_irq,
1094 sxgbe_common_interrupt,
1095 IRQF_SHARED, dev->name, dev);
1096 if (unlikely(ret < 0)) {
1097 netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1098 __func__, priv->lpi_irq, ret);
1099 goto init_error;
1100 }
1101 }
1102
1103 /* Request TX DMA irq lines */
1104 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1105 ret = devm_request_irq(priv->device,
1106 (priv->txq[queue_num])->irq_no,
1107 sxgbe_tx_interrupt, 0,
1108 dev->name, priv->txq[queue_num]);
1109 if (unlikely(ret < 0)) {
1110 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1111 __func__, priv->irq, ret);
1112 goto init_error;
1113 }
1114 }
1115
1116 /* Request RX DMA irq lines */
1117 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1118 ret = devm_request_irq(priv->device,
1119 (priv->rxq[queue_num])->irq_no,
1120 sxgbe_rx_interrupt, 0,
1121 dev->name, priv->rxq[queue_num]);
1122 if (unlikely(ret < 0)) {
1123 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1124 __func__, priv->irq, ret);
1125 goto init_error;
1126 }
1127 }
1128
1129 /* Enable the MAC Rx/Tx */
1130 priv->hw->mac->enable_tx(priv->ioaddr, true);
1131 priv->hw->mac->enable_rx(priv->ioaddr, true);
1132
1133 /* Set the HW DMA mode and the COE */
1134 sxgbe_mtl_operation_mode(priv);
1135
1136 /* Extra statistics */
1137 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1138
1139 priv->xstats.tx_threshold = priv->tx_tc;
1140 priv->xstats.rx_threshold = priv->rx_tc;
1141
1142 /* Start the ball rolling... */
1143 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1144 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1145 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1146
1147 if (priv->phydev)
1148 phy_start(priv->phydev);
1149
1150 /* initalise TX coalesce parameters */
1151 sxgbe_tx_init_coalesce(priv);
1152
1153 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1154 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1155 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1156 }
1157
1158 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1159 priv->eee_enabled = sxgbe_eee_init(priv);
1160
1161 napi_enable(&priv->napi);
1162 netif_start_queue(dev);
1163
1164 return 0;
1165
1166init_error:
1167 free_dma_desc_resources(priv);
1168 if (priv->phydev)
1169 phy_disconnect(priv->phydev);
1170phy_error:
1171 clk_disable_unprepare(priv->sxgbe_clk);
1172
1173 return ret;
1174}
1175
1176/**
1177 * sxgbe_release - close entry point of the driver
1178 * @dev : device pointer.
1179 * Description:
1180 * This is the stop entry point of the driver.
1181 */
1182static int sxgbe_release(struct net_device *dev)
1183{
1184 struct sxgbe_priv_data *priv = netdev_priv(dev);
1185
1186 if (priv->eee_enabled)
1187 del_timer_sync(&priv->eee_ctrl_timer);
1188
1189 /* Stop and disconnect the PHY */
1190 if (priv->phydev) {
1191 phy_stop(priv->phydev);
1192 phy_disconnect(priv->phydev);
1193 priv->phydev = NULL;
1194 }
1195
1196 netif_tx_stop_all_queues(dev);
1197
1198 napi_disable(&priv->napi);
1199
1200 /* delete TX timers */
1201 sxgbe_tx_del_timer(priv);
1202
1203 /* Stop TX/RX DMA and clear the descriptors */
1204 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1205 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1206
1207 /* disable MTL queue */
1208 sxgbe_disable_mtl_engine(priv);
1209
1210 /* Release and free the Rx/Tx resources */
1211 free_dma_desc_resources(priv);
1212
1213 /* Disable the MAC Rx/Tx */
1214 priv->hw->mac->enable_tx(priv->ioaddr, false);
1215 priv->hw->mac->enable_rx(priv->ioaddr, false);
1216
1217 clk_disable_unprepare(priv->sxgbe_clk);
1218
1219 return 0;
1220}
1221
1222/* Prepare first Tx descriptor for doing TSO operation */
1223void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1224 struct sxgbe_tx_norm_desc *first_desc,
1225 struct sk_buff *skb)
1226{
1227 unsigned int total_hdr_len, tcp_hdr_len;
1228
1229 /* Write first Tx descriptor with appropriate value */
1230 tcp_hdr_len = tcp_hdrlen(skb);
1231 total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1232
1233 first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1234 total_hdr_len, DMA_TO_DEVICE);
1235 if (dma_mapping_error(priv->device, first_desc->tdes01))
1236 pr_err("%s: TX dma mapping failed!!\n", __func__);
1237
1238 first_desc->tdes23.tx_rd_des23.first_desc = 1;
1239 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1240 tcp_hdr_len,
1241 skb->len - total_hdr_len);
1242}
1243
1244/**
1245 * sxgbe_xmit: Tx entry point of the driver
1246 * @skb : the socket buffer
1247 * @dev : device pointer
1248 * Description : this is the tx entry point of the driver.
1249 * It programs the chain or the ring and supports oversized frames
1250 * and SG feature.
1251 */
1252static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1253{
1254 unsigned int entry, frag_num;
1255 int cksum_flag = 0;
1256 struct netdev_queue *dev_txq;
1257 unsigned txq_index = skb_get_queue_mapping(skb);
1258 struct sxgbe_priv_data *priv = netdev_priv(dev);
1259 unsigned int tx_rsize = priv->dma_tx_size;
1260 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1261 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1262 struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1263 int nr_frags = skb_shinfo(skb)->nr_frags;
1264 int no_pagedlen = skb_headlen(skb);
1265 int is_jumbo = 0;
1266 u16 cur_mss = skb_shinfo(skb)->gso_size;
1267 u32 ctxt_desc_req = 0;
1268
1269 /* get the TX queue handle */
1270 dev_txq = netdev_get_tx_queue(dev, txq_index);
1271
1272 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1273 ctxt_desc_req = 1;
1274
1275 if (unlikely(vlan_tx_tag_present(skb) ||
1276 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1277 tqueue->hwts_tx_en)))
1278 ctxt_desc_req = 1;
1279
1280 /* get the spinlock */
1281 spin_lock(&tqueue->tx_lock);
1282
1283 if (priv->tx_path_in_lpi_mode)
1284 sxgbe_disable_eee_mode(priv);
1285
1286 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1287 if (!netif_tx_queue_stopped(dev_txq)) {
1288 netif_tx_stop_queue(dev_txq);
1289 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1290 __func__, txq_index);
1291 }
1292 /* release the spin lock in case of BUSY */
1293 spin_unlock(&tqueue->tx_lock);
1294 return NETDEV_TX_BUSY;
1295 }
1296
1297 entry = tqueue->cur_tx % tx_rsize;
1298 tx_desc = tqueue->dma_tx + entry;
1299
1300 first_desc = tx_desc;
1301 if (ctxt_desc_req)
1302 ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1303
1304 /* save the skb address */
1305 tqueue->tx_skbuff[entry] = skb;
1306
1307 if (!is_jumbo) {
1308 if (likely(skb_is_gso(skb))) {
1309 /* TSO support */
1310 if (unlikely(tqueue->prev_mss != cur_mss)) {
1311 priv->hw->desc->tx_ctxt_desc_set_mss(
1312 ctxt_desc, cur_mss);
1313 priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1314 ctxt_desc);
1315 priv->hw->desc->tx_ctxt_desc_reset_ostc(
1316 ctxt_desc);
1317 priv->hw->desc->tx_ctxt_desc_set_ctxt(
1318 ctxt_desc);
1319 priv->hw->desc->tx_ctxt_desc_set_owner(
1320 ctxt_desc);
1321
1322 entry = (++tqueue->cur_tx) % tx_rsize;
1323 first_desc = tqueue->dma_tx + entry;
1324
1325 tqueue->prev_mss = cur_mss;
1326 }
1327 sxgbe_tso_prepare(priv, first_desc, skb);
1328 } else {
1329 tx_desc->tdes01 = dma_map_single(priv->device,
1330 skb->data, no_pagedlen, DMA_TO_DEVICE);
1331 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1332 netdev_err(dev, "%s: TX dma mapping failed!!\n",
1333 __func__);
1334
1335 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1336 no_pagedlen, cksum_flag);
1337 }
1338 }
1339
1340 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1341 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1342 int len = skb_frag_size(frag);
1343
1344 entry = (++tqueue->cur_tx) % tx_rsize;
1345 tx_desc = tqueue->dma_tx + entry;
1346 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1347 DMA_TO_DEVICE);
1348
1349 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1350 tqueue->tx_skbuff[entry] = NULL;
1351
1352 /* prepare the descriptor */
1353 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1354 len, cksum_flag);
1355 /* memory barrier to flush descriptor */
1356 wmb();
1357
1358 /* set the owner */
1359 priv->hw->desc->set_tx_owner(tx_desc);
1360 }
1361
1362 /* close the descriptors */
1363 priv->hw->desc->close_tx_desc(tx_desc);
1364
1365 /* memory barrier to flush descriptor */
1366 wmb();
1367
1368 tqueue->tx_count_frames += nr_frags + 1;
1369 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1370 priv->hw->desc->clear_tx_ic(tx_desc);
1371 priv->xstats.tx_reset_ic_bit++;
1372 mod_timer(&tqueue->txtimer,
1373 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1374 } else {
1375 tqueue->tx_count_frames = 0;
1376 }
1377
1378 /* set owner for first desc */
1379 priv->hw->desc->set_tx_owner(first_desc);
1380
1381 /* memory barrier to flush descriptor */
1382 wmb();
1383
1384 tqueue->cur_tx++;
1385
1386 /* display current ring */
1387 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1388 __func__, tqueue->cur_tx % tx_rsize,
1389 tqueue->dirty_tx % tx_rsize, entry,
1390 first_desc, nr_frags);
1391
1392 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1393 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1394 __func__);
1395 netif_tx_stop_queue(dev_txq);
1396 }
1397
1398 dev->stats.tx_bytes += skb->len;
1399
1400 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1401 tqueue->hwts_tx_en)) {
1402 /* declare that device is doing timestamping */
1403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1404 priv->hw->desc->tx_enable_tstamp(first_desc);
1405 }
1406
1407 if (!tqueue->hwts_tx_en)
1408 skb_tx_timestamp(skb);
1409
1410 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1411
1412 spin_unlock(&tqueue->tx_lock);
1413
1414 return NETDEV_TX_OK;
1415}
1416
1417/**
1418 * sxgbe_rx_refill: refill used skb preallocated buffers
1419 * @priv: driver private structure
1420 * Description : this is to reallocate the skb for the reception process
1421 * that is based on zero-copy.
1422 */
1423static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1424{
1425 unsigned int rxsize = priv->dma_rx_size;
1426 int bfsize = priv->dma_buf_sz;
1427 u8 qnum = priv->cur_rx_qnum;
1428
1429 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1430 priv->rxq[qnum]->dirty_rx++) {
1431 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1432 struct sxgbe_rx_norm_desc *p;
1433
1434 p = priv->rxq[qnum]->dma_rx + entry;
1435
1436 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1437 struct sk_buff *skb;
1438
1439 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1440
1441 if (unlikely(skb == NULL))
1442 break;
1443
1444 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1445 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1446 dma_map_single(priv->device, skb->data, bfsize,
1447 DMA_FROM_DEVICE);
1448
1449 p->rdes23.rx_rd_des23.buf2_addr =
1450 priv->rxq[qnum]->rx_skbuff_dma[entry];
1451 }
1452
1453 /* Added memory barrier for RX descriptor modification */
1454 wmb();
1455 priv->hw->desc->set_rx_owner(p);
1456 /* Added memory barrier for RX descriptor modification */
1457 wmb();
1458 }
1459}
1460
1461/**
1462 * sxgbe_rx: receive the frames from the remote host
1463 * @priv: driver private structure
1464 * @limit: napi bugget.
1465 * Description : this the function called by the napi poll method.
1466 * It gets all the frames inside the ring.
1467 */
1468static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1469{
1470 u8 qnum = priv->cur_rx_qnum;
1471 unsigned int rxsize = priv->dma_rx_size;
1472 unsigned int entry = priv->rxq[qnum]->cur_rx;
1473 unsigned int next_entry = 0;
1474 unsigned int count = 0;
1475 int checksum;
1476 int status;
1477
1478 while (count < limit) {
1479 struct sxgbe_rx_norm_desc *p;
1480 struct sk_buff *skb;
1481 int frame_len;
1482
1483 p = priv->rxq[qnum]->dma_rx + entry;
1484
1485 if (priv->hw->desc->get_rx_owner(p))
1486 break;
1487
1488 count++;
1489
1490 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1491 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1492
1493 /* Read the status of the incoming frame and also get checksum
1494 * value based on whether it is enabled in SXGBE hardware or
1495 * not.
1496 */
1497 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1498 &checksum);
1499 if (unlikely(status < 0)) {
1500 entry = next_entry;
1501 continue;
1502 }
1503 if (unlikely(!priv->rxcsum_insertion))
1504 checksum = CHECKSUM_NONE;
1505
1506 skb = priv->rxq[qnum]->rx_skbuff[entry];
1507
1508 if (unlikely(!skb))
1509 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1510
1511 prefetch(skb->data - NET_IP_ALIGN);
1512 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1513
1514 frame_len = priv->hw->desc->get_rx_frame_len(p);
1515
1516 skb_put(skb, frame_len);
1517
1518 skb->ip_summed = checksum;
1519 if (checksum == CHECKSUM_NONE)
1520 netif_receive_skb(skb);
1521 else
1522 napi_gro_receive(&priv->napi, skb);
1523
1524 entry = next_entry;
1525 }
1526
1527 sxgbe_rx_refill(priv);
1528
1529 return count;
1530}
1531
1532/**
1533 * sxgbe_poll - sxgbe poll method (NAPI)
1534 * @napi : pointer to the napi structure.
1535 * @budget : maximum number of packets that the current CPU can receive from
1536 * all interfaces.
1537 * Description :
1538 * To look at the incoming frames and clear the tx resources.
1539 */
1540static int sxgbe_poll(struct napi_struct *napi, int budget)
1541{
1542 struct sxgbe_priv_data *priv = container_of(napi,
1543 struct sxgbe_priv_data, napi);
1544 int work_done = 0;
1545 u8 qnum = priv->cur_rx_qnum;
1546
1547 priv->xstats.napi_poll++;
1548 /* first, clean the tx queues */
1549 sxgbe_tx_all_clean(priv);
1550
1551 work_done = sxgbe_rx(priv, budget);
1552 if (work_done < budget) {
1553 napi_complete(napi);
1554 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1555 }
1556
1557 return work_done;
1558}
1559
1560/**
1561 * sxgbe_tx_timeout
1562 * @dev : Pointer to net device structure
1563 * Description: this function is called when a packet transmission fails to
1564 * complete within a reasonable time. The driver will mark the error in the
1565 * netdev structure and arrange for the device to be reset to a sane state
1566 * in order to transmit a new packet.
1567 */
1568static void sxgbe_tx_timeout(struct net_device *dev)
1569{
1570 struct sxgbe_priv_data *priv = netdev_priv(dev);
1571
1572 sxgbe_reset_all_tx_queues(priv);
1573}
1574
1575/**
1576 * sxgbe_common_interrupt - main ISR
1577 * @irq: interrupt number.
1578 * @dev_id: to pass the net device pointer.
1579 * Description: this is the main driver interrupt service routine.
1580 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1581 * interrupts.
1582 */
1583static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1584{
1585 struct net_device *netdev = (struct net_device *)dev_id;
1586 struct sxgbe_priv_data *priv = netdev_priv(netdev);
1587 int status;
1588
1589 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1590 /* For LPI we need to save the tx status */
1591 if (status & TX_ENTRY_LPI_MODE) {
1592 priv->xstats.tx_lpi_entry_n++;
1593 priv->tx_path_in_lpi_mode = true;
1594 }
1595 if (status & TX_EXIT_LPI_MODE) {
1596 priv->xstats.tx_lpi_exit_n++;
1597 priv->tx_path_in_lpi_mode = false;
1598 }
1599 if (status & RX_ENTRY_LPI_MODE)
1600 priv->xstats.rx_lpi_entry_n++;
1601 if (status & RX_EXIT_LPI_MODE)
1602 priv->xstats.rx_lpi_exit_n++;
1603
1604 return IRQ_HANDLED;
1605}
1606
1607/**
1608 * sxgbe_tx_interrupt - TX DMA ISR
1609 * @irq: interrupt number.
1610 * @dev_id: to pass the net device pointer.
1611 * Description: this is the tx dma interrupt service routine.
1612 */
1613static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1614{
1615 int status;
1616 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1617 struct sxgbe_priv_data *priv = txq->priv_ptr;
1618
1619 /* get the channel status */
1620 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1621 &priv->xstats);
1622 /* check for normal path */
1623 if (likely((status & handle_tx)))
1624 napi_schedule(&priv->napi);
1625
1626 /* check for unrecoverable error */
1627 if (unlikely((status & tx_hard_error)))
1628 sxgbe_restart_tx_queue(priv, txq->queue_no);
1629
1630 /* check for TC configuration change */
1631 if (unlikely((status & tx_bump_tc) &&
1632 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1633 (priv->tx_tc < 512))) {
1634 /* step of TX TC is 32 till 128, otherwise 64 */
1635 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1636 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1637 txq->queue_no, priv->tx_tc);
1638 priv->xstats.tx_threshold = priv->tx_tc;
1639 }
1640
1641 return IRQ_HANDLED;
1642}
1643
1644/**
1645 * sxgbe_rx_interrupt - RX DMA ISR
1646 * @irq: interrupt number.
1647 * @dev_id: to pass the net device pointer.
1648 * Description: this is the rx dma interrupt service routine.
1649 */
1650static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1651{
1652 int status;
1653 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1654 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1655
1656 /* get the channel status */
1657 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1658 &priv->xstats);
1659
1660 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1661 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1662 __napi_schedule(&priv->napi);
1663 }
1664
1665 /* check for TC configuration change */
1666 if (unlikely((status & rx_bump_tc) &&
1667 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1668 (priv->rx_tc < 128))) {
1669 /* step of TC is 32 */
1670 priv->rx_tc += 32;
1671 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1672 rxq->queue_no, priv->rx_tc);
1673 priv->xstats.rx_threshold = priv->rx_tc;
1674 }
1675
1676 return IRQ_HANDLED;
1677}
1678
1679static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1680{
1681 u64 val = readl(ioaddr + reg_lo);
1682
1683 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1684
1685 return val;
1686}
1687
1688
1689/* sxgbe_get_stats64 - entry point to see statistical information of device
1690 * @dev : device pointer.
1691 * @stats : pointer to hold all the statistical information of device.
1692 * Description:
1693 * This function is a driver entry point whenever ifconfig command gets
1694 * executed to see device statistics. Statistics are number of
1695 * bytes sent or received, errors occured etc.
1696 * Return value:
1697 * This function returns various statistical information of device.
1698 */
1699static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
1700 struct rtnl_link_stats64 *stats)
1701{
1702 struct sxgbe_priv_data *priv = netdev_priv(dev);
1703 void __iomem *ioaddr = priv->ioaddr;
1704 u64 count;
1705
1706 spin_lock(&priv->stats_lock);
1707 /* Freeze the counter registers before reading value otherwise it may
1708 * get updated by hardware while we are reading them
1709 */
1710 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1711
1712 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1713 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1714 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1715
1716 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1717 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1718 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1719
1720 stats->multicast = sxgbe_get_stat64(ioaddr,
1721 SXGBE_MMC_RXMULTILO_GCNT_REG,
1722 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1723
1724 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1725 SXGBE_MMC_RXCRCERRLO_REG,
1726 SXGBE_MMC_RXCRCERRHI_REG);
1727
1728 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1729 SXGBE_MMC_RXLENERRLO_REG,
1730 SXGBE_MMC_RXLENERRHI_REG);
1731
1732 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1733 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1734 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1735
1736 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1737 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1738 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1739
1740 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1741 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1742
1743 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1744 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1745 stats->tx_errors = count - stats->tx_errors;
1746 stats->tx_packets = count;
1747 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1748 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1749 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1750 spin_unlock(&priv->stats_lock);
1751
1752 return stats;
1753}
1754
1755/* sxgbe_set_features - entry point to set offload features of the device.
1756 * @dev : device pointer.
1757 * @features : features which are required to be set.
1758 * Description:
1759 * This function is a driver entry point and called by Linux kernel whenever
1760 * any device features are set or reset by user.
1761 * Return value:
1762 * This function returns 0 after setting or resetting device features.
1763 */
1764static int sxgbe_set_features(struct net_device *dev,
1765 netdev_features_t features)
1766{
1767 struct sxgbe_priv_data *priv = netdev_priv(dev);
1768 netdev_features_t changed = dev->features ^ features;
1769
1770 if (changed & NETIF_F_RXCSUM) {
1771 if (features & NETIF_F_RXCSUM) {
1772 priv->hw->mac->enable_rx_csum(priv->ioaddr);
1773 priv->rxcsum_insertion = true;
1774 } else {
1775 priv->hw->mac->disable_rx_csum(priv->ioaddr);
1776 priv->rxcsum_insertion = false;
1777 }
1778 }
1779
1780 return 0;
1781}
1782
1783/* sxgbe_change_mtu - entry point to change MTU size for the device.
1784 * @dev : device pointer.
1785 * @new_mtu : the new MTU size for the device.
1786 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1787 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1788 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1789 * Return value:
1790 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1791 * file on failure.
1792 */
1793static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1794{
1795 /* RFC 791, page 25, "Every internet module must be able to forward
1796 * a datagram of 68 octets without further fragmentation."
1797 */
1798 if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
1799 netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
1800 MIN_MTU, MAX_MTU);
1801 return -EINVAL;
1802 }
1803
1804 /* Return if the buffer sizes will not change */
1805 if (dev->mtu == new_mtu)
1806 return 0;
1807
1808 dev->mtu = new_mtu;
1809
1810 if (!netif_running(dev))
1811 return 0;
1812
1813 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1814 * changed then reinitilisation of the receive ring buffers need to be
1815 * done. Hence bring interface down and bring interface back up
1816 */
1817 sxgbe_release(dev);
1818 return sxgbe_open(dev);
1819}
1820
1821static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1822 unsigned int reg_n)
1823{
1824 unsigned long data;
1825
1826 data = (addr[5] << 8) | addr[4];
1827 /* For MAC Addr registers se have to set the Address Enable (AE)
1828 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1829 * is RO.
1830 */
1831 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1832 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1833 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1834}
1835
1836/**
1837 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1838 * a device. unicast, multicast addressing
1839 * @dev : pointer to the device structure
1840 * Description:
1841 * This function is a driver entry point which gets called by the kernel
1842 * whenever different receive mode like unicast, multicast and promiscuous
1843 * must be enabled/disabled.
1844 * Return value:
1845 * void.
1846 */
1847static void sxgbe_set_rx_mode(struct net_device *dev)
1848{
1849 struct sxgbe_priv_data *priv = netdev_priv(dev);
1850 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1851 unsigned int value = 0;
1852 u32 mc_filter[2];
1853 struct netdev_hw_addr *ha;
1854 int reg = 1;
1855
1856 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1857 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1858
1859 if (dev->flags & IFF_PROMISC) {
1860 value = SXGBE_FRAME_FILTER_PR;
1861
1862 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1863 (dev->flags & IFF_ALLMULTI)) {
1864 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
1865 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1866 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1867
1868 } else if (!netdev_mc_empty(dev)) {
1869 /* Hash filter for multicast */
1870 value = SXGBE_FRAME_FILTER_HMC;
1871
1872 memset(mc_filter, 0, sizeof(mc_filter));
1873 netdev_for_each_mc_addr(ha, dev) {
1874 /* The upper 6 bits of the calculated CRC are used to
1875 * index the contens of the hash table
1876 */
1877 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1878
1879 /* The most significant bit determines the register to
1880 * use (H/L) while the other 5 bits determine the bit
1881 * within the register.
1882 */
1883 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1884 }
1885 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1886 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1887 }
1888
1889 /* Handle multiple unicast addresses (perfect filtering) */
1890 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1891 /* Switch to promiscuous mode if more than 16 addrs
1892 * are required
1893 */
1894 value |= SXGBE_FRAME_FILTER_PR;
1895 else {
1896 netdev_for_each_uc_addr(ha, dev) {
1897 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1898 reg++;
1899 }
1900 }
1901#ifdef FRAME_FILTER_DEBUG
1902 /* Enable Receive all mode (to debug filtering_fail errors) */
1903 value |= SXGBE_FRAME_FILTER_RA;
1904#endif
1905 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1906
1907 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1908 readl(ioaddr + SXGBE_FRAME_FILTER),
1909 readl(ioaddr + SXGBE_HASH_HIGH),
1910 readl(ioaddr + SXGBE_HASH_LOW));
1911}
1912
1913/**
1914 * sxgbe_config - entry point for changing configuration mode passed on by
1915 * ifconfig
1916 * @dev : pointer to the device structure
1917 * @map : pointer to the device mapping structure
1918 * Description:
1919 * This function is a driver entry point which gets called by the kernel
1920 * whenever some device configuration is changed.
1921 * Return value:
1922 * This function returns 0 if success and appropriate error otherwise.
1923 */
1924static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1925{
1926 struct sxgbe_priv_data *priv = netdev_priv(dev);
1927
1928 /* Can't act on a running interface */
1929 if (dev->flags & IFF_UP)
1930 return -EBUSY;
1931
1932 /* Don't allow changing the I/O address */
1933 if (map->base_addr != (unsigned long)priv->ioaddr) {
1934 netdev_warn(dev, "can't change I/O address\n");
1935 return -EOPNOTSUPP;
1936 }
1937
1938 /* Don't allow changing the IRQ */
1939 if (map->irq != priv->irq) {
1940 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1941 return -EOPNOTSUPP;
1942 }
1943
1944 return 0;
1945}
1946
1947#ifdef CONFIG_NET_POLL_CONTROLLER
1948/**
1949 * sxgbe_poll_controller - entry point for polling receive by device
1950 * @dev : pointer to the device structure
1951 * Description:
1952 * This function is used by NETCONSOLE and other diagnostic tools
1953 * to allow network I/O with interrupts disabled.
1954 * Return value:
1955 * Void.
1956 */
1957static void sxgbe_poll_controller(struct net_device *dev)
1958{
1959 struct sxgbe_priv_data *priv = netdev_priv(dev);
1960
1961 disable_irq(priv->irq);
1962 sxgbe_rx_interrupt(priv->irq, dev);
1963 enable_irq(priv->irq);
1964}
1965#endif
1966
1967/* sxgbe_ioctl - Entry point for the Ioctl
1968 * @dev: Device pointer.
1969 * @rq: An IOCTL specefic structure, that can contain a pointer to
1970 * a proprietary structure used to pass information to the driver.
1971 * @cmd: IOCTL command
1972 * Description:
1973 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1974 */
1975static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1976{
1977 struct sxgbe_priv_data *priv = netdev_priv(dev);
1978 int ret = -EOPNOTSUPP;
1979
1980 if (!netif_running(dev))
1981 return -EINVAL;
1982
1983 switch (cmd) {
1984 case SIOCGMIIPHY:
1985 case SIOCGMIIREG:
1986 case SIOCSMIIREG:
1987 if (!priv->phydev)
1988 return -EINVAL;
1989 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1990 break;
1991 default:
1992 break;
1993 }
1994
1995 return ret;
1996}
1997
1998static const struct net_device_ops sxgbe_netdev_ops = {
1999 .ndo_open = sxgbe_open,
2000 .ndo_start_xmit = sxgbe_xmit,
2001 .ndo_stop = sxgbe_release,
2002 .ndo_get_stats64 = sxgbe_get_stats64,
2003 .ndo_change_mtu = sxgbe_change_mtu,
2004 .ndo_set_features = sxgbe_set_features,
2005 .ndo_set_rx_mode = sxgbe_set_rx_mode,
2006 .ndo_tx_timeout = sxgbe_tx_timeout,
2007 .ndo_do_ioctl = sxgbe_ioctl,
2008 .ndo_set_config = sxgbe_config,
2009#ifdef CONFIG_NET_POLL_CONTROLLER
2010 .ndo_poll_controller = sxgbe_poll_controller,
2011#endif
2012 .ndo_set_mac_address = eth_mac_addr,
2013};
2014
2015/* Get the hardware ops */
2016void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
2017{
2018 ops_ptr->mac = sxgbe_get_core_ops();
2019 ops_ptr->desc = sxgbe_get_desc_ops();
2020 ops_ptr->dma = sxgbe_get_dma_ops();
2021 ops_ptr->mtl = sxgbe_get_mtl_ops();
2022
2023 /* set the MDIO communication Address/Data regisers */
2024 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
2025 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
2026
2027 /* Assigning the default link settings
2028 * no SXGBE defined default values to be set in registers,
2029 * so assigning as 0 for port and duplex
2030 */
2031 ops_ptr->link.port = 0;
2032 ops_ptr->link.duplex = 0;
2033 ops_ptr->link.speed = SXGBE_SPEED_10G;
2034}
2035
2036/**
2037 * sxgbe_hw_init - Init the GMAC device
2038 * @priv: driver private structure
2039 * Description: this function checks the HW capability
2040 * (if supported) and sets the driver's features.
2041 */
2042static void sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2043{
2044 u32 ctrl_ids;
2045
2046 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2047
2048 /* get the hardware ops */
2049 sxgbe_get_ops(priv->hw);
2050
2051 /* get the controller id */
2052 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2053 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2054 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2055 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2056 priv->hw->ctrl_uid, priv->hw->ctrl_id);
2057
2058 /* get the H/W features */
2059 if (!sxgbe_get_hw_features(priv))
2060 pr_info("Hardware features not found\n");
2061
2062 if (priv->hw_cap.tx_csum_offload)
2063 pr_info("TX Checksum offload supported\n");
2064
2065 if (priv->hw_cap.rx_csum_offload)
2066 pr_info("RX Checksum offload supported\n");
2067}
2068
2069/**
2070 * sxgbe_drv_probe
2071 * @device: device pointer
2072 * @plat_dat: platform data pointer
2073 * @addr: iobase memory address
2074 * Description: this is the main probe function used to
2075 * call the alloc_etherdev, allocate the priv structure.
2076 */
2077struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2078 struct sxgbe_plat_data *plat_dat,
2079 void __iomem *addr)
2080{
2081 struct sxgbe_priv_data *priv;
2082 struct net_device *ndev;
2083 int ret;
2084 u8 queue_num;
2085
2086 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2087 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2088 if (!ndev)
2089 return NULL;
2090
2091 SET_NETDEV_DEV(ndev, device);
2092
2093 priv = netdev_priv(ndev);
2094 priv->device = device;
2095 priv->dev = ndev;
2096
2097 sxgbe_set_ethtool_ops(ndev);
2098 priv->plat = plat_dat;
2099 priv->ioaddr = addr;
2100
2101 /* Verify driver arguments */
2102 sxgbe_verify_args();
2103
2104 /* Init MAC and get the capabilities */
2105 sxgbe_hw_init(priv);
2106
2107 /* allocate memory resources for Descriptor rings */
2108 ret = txring_mem_alloc(priv);
2109 if (ret)
2110 goto error_free_netdev;
2111
2112 ret = rxring_mem_alloc(priv);
2113 if (ret)
2114 goto error_free_netdev;
2115
2116 ndev->netdev_ops = &sxgbe_netdev_ops;
2117
2118 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2119 NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2120 NETIF_F_GRO;
2121 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2122 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2123
2124 /* assign filtering support */
2125 ndev->priv_flags |= IFF_UNICAST_FLT;
2126
2127 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2128
2129 /* Enable TCP segmentation offload for all DMA channels */
2130 if (priv->hw_cap.tcpseg_offload) {
2131 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2132 priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2133 }
2134 }
2135
2136 /* Enable Rx checksum offload */
2137 if (priv->hw_cap.rx_csum_offload) {
2138 priv->hw->mac->enable_rx_csum(priv->ioaddr);
2139 priv->rxcsum_insertion = true;
2140 }
2141
2142 /* Initialise pause frame settings */
2143 priv->rx_pause = 1;
2144 priv->tx_pause = 1;
2145
2146 /* Rx Watchdog is available, enable depend on platform data */
2147 if (!priv->plat->riwt_off) {
2148 priv->use_riwt = 1;
2149 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2150 }
2151
2152 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2153
2154 spin_lock_init(&priv->stats_lock);
2155
2156 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2157 if (IS_ERR(priv->sxgbe_clk)) {
2158 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2159 __func__);
2160 goto error_clk_get;
2161 }
2162
2163 /* If a specific clk_csr value is passed from the platform
2164 * this means that the CSR Clock Range selection cannot be
2165 * changed at run-time and it is fixed. Viceversa the driver'll try to
2166 * set the MDC clock dynamically according to the csr actual
2167 * clock input.
2168 */
2169 if (!priv->plat->clk_csr)
2170 sxgbe_clk_csr_set(priv);
2171 else
2172 priv->clk_csr = priv->plat->clk_csr;
2173
2174 /* MDIO bus Registration */
2175 ret = sxgbe_mdio_register(ndev);
2176 if (ret < 0) {
2177 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2178 __func__, priv->plat->bus_id);
2179 goto error_mdio_register;
2180 }
2181
2182 ret = register_netdev(ndev);
2183 if (ret) {
2184 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2185 goto error_netdev_register;
2186 }
2187
2188 sxgbe_check_ether_addr(priv);
2189
2190 return priv;
2191
2192error_mdio_register:
2193 clk_put(priv->sxgbe_clk);
2194error_clk_get:
2195error_netdev_register:
2196 netif_napi_del(&priv->napi);
2197error_free_netdev:
2198 free_netdev(ndev);
2199
2200 return NULL;
2201}
2202
2203/**
2204 * sxgbe_drv_remove
2205 * @ndev: net device pointer
2206 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2207 * changes the link status, releases the DMA descriptor rings.
2208 */
2209int sxgbe_drv_remove(struct net_device *ndev)
2210{
2211 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2212
2213 netdev_info(ndev, "%s: removing driver\n", __func__);
2214
2215 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2216 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2217
2218 priv->hw->mac->enable_tx(priv->ioaddr, false);
2219 priv->hw->mac->enable_rx(priv->ioaddr, false);
2220
2221 netif_napi_del(&priv->napi);
2222
2223 sxgbe_mdio_unregister(ndev);
2224
2225 unregister_netdev(ndev);
2226
2227 free_netdev(ndev);
2228
2229 return 0;
2230}
2231
2232#ifdef CONFIG_PM
2233int sxgbe_suspend(struct net_device *ndev)
2234{
2235 return 0;
2236}
2237
2238int sxgbe_resume(struct net_device *ndev)
2239{
2240 return 0;
2241}
2242
2243int sxgbe_freeze(struct net_device *ndev)
2244{
2245 return -ENOSYS;
2246}
2247
2248int sxgbe_restore(struct net_device *ndev)
2249{
2250 return -ENOSYS;
2251}
2252#endif /* CONFIG_PM */
2253
2254/* Driver is configured as Platform driver */
2255static int __init sxgbe_init(void)
2256{
2257 int ret;
2258
2259 ret = sxgbe_register_platform();
2260 if (ret)
2261 goto err;
2262 return 0;
2263err:
2264 pr_err("driver registration failed\n");
2265 return ret;
2266}
2267
2268static void __exit sxgbe_exit(void)
2269{
2270 sxgbe_unregister_platform();
2271}
2272
2273module_init(sxgbe_init);
2274module_exit(sxgbe_exit);
2275
2276#ifndef MODULE
2277static int __init sxgbe_cmdline_opt(char *str)
2278{
2279 char *opt;
2280
2281 if (!str || !*str)
2282 return -EINVAL;
2283 while ((opt = strsep(&str, ",")) != NULL) {
2284 if (!strncmp(opt, "eee_timer:", 6)) {
2285 if (kstrtoint(opt + 10, 0, &eee_timer))
2286 goto err;
2287 }
2288 }
2289 return 0;
2290
2291err:
2292 pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2293 return -EINVAL;
2294}
2295
2296__setup("sxgbeeth=", sxgbe_cmdline_opt);
2297#endif /* MODULE */
2298
2299
2300
2301MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2302
2303MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2304MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2305
2306MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2307MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2308MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2309MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2310
2311MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
new file mode 100644
index 000000000000..b0eb0a2c52ca
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -0,0 +1,251 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/mii.h>
17#include <linux/netdevice.h>
18#include <linux/platform_device.h>
19#include <linux/phy.h>
20#include <linux/slab.h>
21#include <linux/sxgbe_platform.h>
22
23#include "sxgbe_common.h"
24#include "sxgbe_reg.h"
25
26#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */
31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{
34 unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */
35
36 while (!time_after(jiffies, fin_time)) {
37 if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY))
38 return 0;
39 cpu_relax();
40 }
41
42 return -EBUSY;
43}
44
45static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
46 u16 phydata)
47{
48 u32 reg = phydata;
49
50 reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM |
51 ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY;
52 writel(reg, sp->ioaddr + sp->hw->mii.data);
53}
54
55static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
56 int phyreg, u16 phydata)
57{
58 u32 reg;
59
60 /* set mdio address register */
61 reg = ((phyreg >> 16) & 0x1f) << 21;
62 reg |= (phyaddr << 16) | (phyreg & 0xffff);
63 writel(reg, sp->ioaddr + sp->hw->mii.addr);
64
65 sxgbe_mdio_ctrl_data(sp, cmd, phydata);
66}
67
68static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
69 int phyreg, u16 phydata)
70{
71 u32 reg;
72
73 writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG);
74
75 /* set mdio address register */
76 reg = (phyaddr << 16) | (phyreg & 0x1f);
77 writel(reg, sp->ioaddr + sp->hw->mii.addr);
78
79 sxgbe_mdio_ctrl_data(sp, cmd, phydata);
80}
81
82static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
83 int phyreg, u16 phydata)
84{
85 const struct mii_regs *mii = &sp->hw->mii;
86 int rc;
87
88 rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
89 if (rc < 0)
90 return rc;
91
92 if (phyreg & MII_ADDR_C45) {
93 sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
94 } else {
95 /* Ports 0-3 only support C22. */
96 if (phyaddr >= 4)
97 return -ENODEV;
98
99 sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
100 }
101
102 return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
103}
104
105/**
106 * sxgbe_mdio_read
107 * @bus: points to the mii_bus structure
108 * @phyaddr: address of phy port
109 * @phyreg: address of register with in phy register
110 * Description: this function used for C45 and C22 MDIO Read
111 */
112static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
113{
114 struct net_device *ndev = bus->priv;
115 struct sxgbe_priv_data *priv = netdev_priv(ndev);
116 int rc;
117
118 rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
119 if (rc < 0)
120 return rc;
121
122 return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
123}
124
125/**
126 * sxgbe_mdio_write
127 * @bus: points to the mii_bus structure
128 * @phyaddr: address of phy port
129 * @phyreg: address of phy registers
130 * @phydata: data to be written into phy register
131 * Description: this function is used for C45 and C22 MDIO write
132 */
133static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
134 u16 phydata)
135{
136 struct net_device *ndev = bus->priv;
137 struct sxgbe_priv_data *priv = netdev_priv(ndev);
138
139 return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
140 phydata);
141}
142
143int sxgbe_mdio_register(struct net_device *ndev)
144{
145 struct mii_bus *mdio_bus;
146 struct sxgbe_priv_data *priv = netdev_priv(ndev);
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr;
149 int *irqlist;
150 bool act;
151
152 /* allocate the new mdio bus */
153 mdio_bus = mdiobus_alloc();
154 if (!mdio_bus) {
155 netdev_err(ndev, "%s: mii bus allocation failed\n", __func__);
156 return -ENOMEM;
157 }
158
159 if (mdio_data->irqs)
160 irqlist = mdio_data->irqs;
161 else
162 irqlist = priv->mii_irq;
163
164 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe";
166 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
169 mdio_bus->name, priv->plat->bus_id);
170 mdio_bus->priv = ndev;
171 mdio_bus->phy_mask = mdio_data->phy_mask;
172 mdio_bus->parent = priv->device;
173
174 /* register with kernel subsystem */
175 err = mdiobus_register(mdio_bus);
176 if (err != 0) {
177 netdev_err(ndev, "mdiobus register failed\n");
178 goto mdiobus_err;
179 }
180
181 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
182 struct phy_device *phy = mdio_bus->phy_map[phy_addr];
183
184 if (phy) {
185 char irq_num[4];
186 char *irq_str;
187 /* If an IRQ was provided to be assigned after
188 * the bus probe, do it here.
189 */
190 if ((mdio_data->irqs == NULL) &&
191 (mdio_data->probed_phy_irq > 0)) {
192 irqlist[phy_addr] = mdio_data->probed_phy_irq;
193 phy->irq = mdio_data->probed_phy_irq;
194 }
195
196 /* If we're going to bind the MAC to this PHY bus,
197 * and no PHY number was provided to the MAC,
198 * use the one probed here.
199 */
200 if (priv->plat->phy_addr == -1)
201 priv->plat->phy_addr = phy_addr;
202
203 act = (priv->plat->phy_addr == phy_addr);
204 switch (phy->irq) {
205 case PHY_POLL:
206 irq_str = "POLL";
207 break;
208 case PHY_IGNORE_INTERRUPT:
209 irq_str = "IGNORE";
210 break;
211 default:
212 sprintf(irq_num, "%d", phy->irq);
213 irq_str = irq_num;
214 break;
215 }
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : "");
219 }
220 }
221
222 if (!err) {
223 netdev_err(ndev, "PHY not found\n");
224 mdiobus_unregister(mdio_bus);
225 mdiobus_free(mdio_bus);
226 goto mdiobus_err;
227 }
228
229 priv->mii = mdio_bus;
230
231 return 0;
232
233mdiobus_err:
234 mdiobus_free(mdio_bus);
235 return err;
236}
237
238int sxgbe_mdio_unregister(struct net_device *ndev)
239{
240 struct sxgbe_priv_data *priv = netdev_priv(ndev);
241
242 if (!priv->mii)
243 return 0;
244
245 mdiobus_unregister(priv->mii);
246 priv->mii->priv = NULL;
247 mdiobus_free(priv->mii);
248 priv->mii = NULL;
249
250 return 0;
251}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
new file mode 100644
index 000000000000..324681c2bb74
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -0,0 +1,254 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/errno.h>
17#include <linux/export.h>
18#include <linux/jiffies.h>
19
20#include "sxgbe_mtl.h"
21#include "sxgbe_reg.h"
22
23static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
24 unsigned int raa)
25{
26 u32 reg_val;
27
28 reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
29 reg_val &= ETS_RST;
30
31 /* ETS Algorith */
32 switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
33 case ETS_WRR:
34 reg_val &= ETS_WRR;
35 break;
36 case ETS_WFQ:
37 reg_val |= ETS_WFQ;
38 break;
39 case ETS_DWRR:
40 reg_val |= ETS_DWRR;
41 break;
42 }
43 writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
44
45 switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
46 case RAA_SP:
47 reg_val &= RAA_SP;
48 break;
49 case RAA_WSP:
50 reg_val |= RAA_WSP;
51 break;
52 }
53 writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
54}
55
56/* For Dynamic DMA channel mapping for Rx queue */
57static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
58{
59 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
60 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
61 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
62}
63
64static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
65 int queue_fifo)
66{
67 u32 fifo_bits, reg_val;
68
69 /* 0 means 256 bytes */
70 fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
71 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
72 reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
73 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
74}
75
76static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
77 int queue_fifo)
78{
79 u32 fifo_bits, reg_val;
80
81 /* 0 means 256 bytes */
82 fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
83 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
84 reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
85 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
86}
87
88static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
89{
90 u32 reg_val;
91
92 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
93 reg_val |= SXGBE_MTL_ENABLE_QUEUE;
94 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
95}
96
97static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
98{
99 u32 reg_val;
100
101 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
102 reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
103 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
104}
105
106static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
107 int threshold)
108{
109 u32 reg_val;
110
111 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
112 reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
113 reg_val |= (threshold << RX_FC_ACTIVE);
114
115 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
116}
117
118static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
119{
120 u32 reg_val;
121
122 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
123 reg_val |= SXGBE_MTL_ENABLE_FC;
124 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
125}
126
127static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
128 int threshold)
129{
130 u32 reg_val;
131
132 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
133 reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
134 reg_val |= (threshold << RX_FC_DEACTIVE);
135
136 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
137}
138
139static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
140{
141 u32 reg_val;
142
143 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
144 reg_val |= SXGBE_MTL_RXQ_OP_FEP;
145
146 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
147}
148
149static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
150{
151 u32 reg_val;
152
153 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
154 reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
155
156 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
157}
158
159static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
160{
161 u32 reg_val;
162
163 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
164 reg_val |= SXGBE_MTL_RXQ_OP_FUP;
165
166 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
167}
168
169static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
170{
171 u32 reg_val;
172
173 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
174 reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
175
176 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
177}
178
179
180static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
181 int tx_mode)
182{
183 u32 reg_val;
184
185 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
186 /* TX specific MTL mode settings */
187 if (tx_mode == SXGBE_MTL_SFMODE) {
188 reg_val |= SXGBE_MTL_SFMODE;
189 } else {
190 /* set the TTC values */
191 if (tx_mode <= 64)
192 reg_val |= MTL_CONTROL_TTC_64;
193 else if (tx_mode <= 96)
194 reg_val |= MTL_CONTROL_TTC_96;
195 else if (tx_mode <= 128)
196 reg_val |= MTL_CONTROL_TTC_128;
197 else if (tx_mode <= 192)
198 reg_val |= MTL_CONTROL_TTC_192;
199 else if (tx_mode <= 256)
200 reg_val |= MTL_CONTROL_TTC_256;
201 else if (tx_mode <= 384)
202 reg_val |= MTL_CONTROL_TTC_384;
203 else
204 reg_val |= MTL_CONTROL_TTC_512;
205 }
206
207 /* write into TXQ operation register */
208 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
209}
210
211static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
212 int rx_mode)
213{
214 u32 reg_val;
215
216 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
217 /* RX specific MTL mode settings */
218 if (rx_mode == SXGBE_RX_MTL_SFMODE) {
219 reg_val |= SXGBE_RX_MTL_SFMODE;
220 } else {
221 if (rx_mode <= 64)
222 reg_val |= MTL_CONTROL_RTC_64;
223 else if (rx_mode <= 96)
224 reg_val |= MTL_CONTROL_RTC_96;
225 else if (rx_mode <= 128)
226 reg_val |= MTL_CONTROL_RTC_128;
227 }
228
229 /* write into RXQ operation register */
230 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
231}
232
233static const struct sxgbe_mtl_ops mtl_ops = {
234 .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize,
235 .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize,
236 .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue,
237 .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue,
238 .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue,
239 .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode,
240 .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode,
241 .mtl_init = sxgbe_mtl_init,
242 .mtl_fc_active = sxgbe_mtl_fc_active,
243 .mtl_fc_deactive = sxgbe_mtl_fc_deactive,
244 .mtl_fc_enable = sxgbe_mtl_fc_enable,
245 .mtl_fep_enable = sxgbe_mtl_fep_enable,
246 .mtl_fep_disable = sxgbe_mtl_fep_disable,
247 .mtl_fup_enable = sxgbe_mtl_fup_enable,
248 .mtl_fup_disable = sxgbe_mtl_fup_disable
249};
250
251const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
252{
253 return &mtl_ops;
254}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
new file mode 100644
index 000000000000..7e4810c4137e
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
@@ -0,0 +1,104 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_MTL_H__
13#define __SXGBE_MTL_H__
14
15#define SXGBE_MTL_OPMODE_ESTMASK 0x3
16#define SXGBE_MTL_OPMODE_RAAMASK 0x1
17#define SXGBE_MTL_FCMASK 0x7
18#define SXGBE_MTL_TX_FIFO_DIV 256
19#define SXGBE_MTL_RX_FIFO_DIV 256
20
21#define SXGBE_MTL_RXQ_OP_FEP BIT(4)
22#define SXGBE_MTL_RXQ_OP_FUP BIT(3)
23#define SXGBE_MTL_ENABLE_FC 0x80
24
25#define ETS_WRR 0xFFFFFF9F
26#define ETS_RST 0xFFFFFF9F
27#define ETS_WFQ 0x00000020
28#define ETS_DWRR 0x00000040
29#define RAA_SP 0xFFFFFFFB
30#define RAA_WSP 0x00000004
31
32#define RX_QUEUE_DYNAMIC 0x80808080
33#define RX_FC_ACTIVE 8
34#define RX_FC_DEACTIVE 13
35
36enum ttc_control {
37 MTL_CONTROL_TTC_64 = 0x00000000,
38 MTL_CONTROL_TTC_96 = 0x00000020,
39 MTL_CONTROL_TTC_128 = 0x00000030,
40 MTL_CONTROL_TTC_192 = 0x00000040,
41 MTL_CONTROL_TTC_256 = 0x00000050,
42 MTL_CONTROL_TTC_384 = 0x00000060,
43 MTL_CONTROL_TTC_512 = 0x00000070,
44};
45
46enum rtc_control {
47 MTL_CONTROL_RTC_64 = 0x00000000,
48 MTL_CONTROL_RTC_96 = 0x00000002,
49 MTL_CONTROL_RTC_128 = 0x00000003,
50};
51
52enum flow_control_th {
53 MTL_FC_FULL_1K = 0x00000000,
54 MTL_FC_FULL_2K = 0x00000001,
55 MTL_FC_FULL_4K = 0x00000002,
56 MTL_FC_FULL_5K = 0x00000003,
57 MTL_FC_FULL_6K = 0x00000004,
58 MTL_FC_FULL_8K = 0x00000005,
59 MTL_FC_FULL_16K = 0x00000006,
60 MTL_FC_FULL_24K = 0x00000007,
61};
62
63struct sxgbe_mtl_ops {
64 void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg,
65 unsigned int raa);
66
67 void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
68 int mtl_fifo);
69
70 void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
71 int queue_fifo);
72
73 void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
74
75 void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
76
77 void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
78 int tx_mode);
79
80 void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
81 int rx_mode);
82
83 void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr);
84
85 void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
86 int threshold);
87
88 void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
89 int threshold);
90
91 void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
92
93 void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
94
95 void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num);
96
97 void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num);
98
99 void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num);
100};
101
102const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
103
104#endif /* __SXGBE_MTL_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
new file mode 100644
index 000000000000..94c2cd73d4a9
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -0,0 +1,259 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/etherdevice.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/of.h>
20#include <linux/of_irq.h>
21#include <linux/of_net.h>
22#include <linux/phy.h>
23#include <linux/platform_device.h>
24#include <linux/sxgbe_platform.h>
25
26#include "sxgbe_common.h"
27#include "sxgbe_reg.h"
28
29#ifdef CONFIG_OF
30static int sxgbe_probe_config_dt(struct platform_device *pdev,
31 struct sxgbe_plat_data *plat,
32 const char **mac)
33{
34 struct device_node *np = pdev->dev.of_node;
35 struct sxgbe_dma_cfg *dma_cfg;
36
37 if (!np)
38 return -ENODEV;
39
40 *mac = of_get_mac_address(np);
41 plat->interface = of_get_phy_mode(np);
42
43 plat->bus_id = of_alias_get_id(np, "ethernet");
44 if (plat->bus_id < 0)
45 plat->bus_id = 0;
46
47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
48 sizeof(*plat->mdio_bus_data),
49 GFP_KERNEL);
50
51 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
52 if (!dma_cfg)
53 return -ENOMEM;
54
55 plat->dma_cfg = dma_cfg;
56 of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl);
57 if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0)
58 dma_cfg->fixed_burst = true;
59
60 return 0;
61}
62#else
63static int sxgbe_probe_config_dt(struct platform_device *pdev,
64 struct sxgbe_plat_data *plat,
65 const char **mac)
66{
67 return -ENOSYS;
68}
69#endif /* CONFIG_OF */
70
71/**
72 * sxgbe_platform_probe
73 * @pdev: platform device pointer
74 * Description: platform_device probe function. It allocates
75 * the necessary resources and invokes the main to init
76 * the net device, register the mdio bus etc.
77 */
78static int sxgbe_platform_probe(struct platform_device *pdev)
79{
80 int ret;
81 int i, chan;
82 struct resource *res;
83 struct device *dev = &pdev->dev;
84 void __iomem *addr;
85 struct sxgbe_priv_data *priv = NULL;
86 struct sxgbe_plat_data *plat_dat = NULL;
87 const char *mac = NULL;
88 struct net_device *ndev = platform_get_drvdata(pdev);
89 struct device_node *node = dev->of_node;
90
91 /* Get memory resource */
92 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
93 if (!res)
94 goto err_out;
95
96 addr = devm_ioremap_resource(dev, res);
97 if (IS_ERR(addr))
98 return PTR_ERR(addr);
99
100 if (pdev->dev.of_node) {
101 plat_dat = devm_kzalloc(&pdev->dev,
102 sizeof(struct sxgbe_plat_data),
103 GFP_KERNEL);
104 if (!plat_dat)
105 return -ENOMEM;
106
107 ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
108 if (ret) {
109 pr_err("%s: main dt probe failed\n", __func__);
110 return ret;
111 }
112 }
113
114 /* Get MAC address if available (DT) */
115 if (mac)
116 ether_addr_copy(priv->dev->dev_addr, mac);
117
118 priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
119 if (!priv) {
120 pr_err("%s: main driver probe failed\n", __func__);
121 goto err_out;
122 }
123
124 /* Get the SXGBE common INT information */
125 priv->irq = irq_of_parse_and_map(node, 0);
126 if (priv->irq <= 0) {
127 dev_err(dev, "sxgbe common irq parsing failed\n");
128 goto err_drv_remove;
129 }
130
131 /* Get the TX/RX IRQ numbers */
132 for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
133 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
134 if (priv->txq[i]->irq_no <= 0) {
135 dev_err(dev, "sxgbe tx irq parsing failed\n");
136 goto err_tx_irq_unmap;
137 }
138 }
139
140 for (i = 0; i < SXGBE_RX_QUEUES; i++) {
141 priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
142 if (priv->rxq[i]->irq_no <= 0) {
143 dev_err(dev, "sxgbe rx irq parsing failed\n");
144 goto err_rx_irq_unmap;
145 }
146 }
147
148 priv->lpi_irq = irq_of_parse_and_map(node, chan);
149 if (priv->lpi_irq <= 0) {
150 dev_err(dev, "sxgbe lpi irq parsing failed\n");
151 goto err_rx_irq_unmap;
152 }
153
154 platform_set_drvdata(pdev, priv->dev);
155
156 pr_debug("platform driver registration completed\n");
157
158 return 0;
159
160err_rx_irq_unmap:
161 while (--i)
162 irq_dispose_mapping(priv->rxq[i]->irq_no);
163 i = SXGBE_TX_QUEUES;
164err_tx_irq_unmap:
165 while (--i)
166 irq_dispose_mapping(priv->txq[i]->irq_no);
167 irq_dispose_mapping(priv->irq);
168err_drv_remove:
169 sxgbe_drv_remove(ndev);
170err_out:
171 return -ENODEV;
172}
173
174/**
175 * sxgbe_platform_remove
176 * @pdev: platform device pointer
177 * Description: this function calls the main to free the net resources
178 * and calls the platforms hook and release the resources (e.g. mem).
179 */
180static int sxgbe_platform_remove(struct platform_device *pdev)
181{
182 struct net_device *ndev = platform_get_drvdata(pdev);
183 int ret = sxgbe_drv_remove(ndev);
184
185 return ret;
186}
187
188#ifdef CONFIG_PM
189static int sxgbe_platform_suspend(struct device *dev)
190{
191 struct net_device *ndev = dev_get_drvdata(dev);
192
193 return sxgbe_suspend(ndev);
194}
195
196static int sxgbe_platform_resume(struct device *dev)
197{
198 struct net_device *ndev = dev_get_drvdata(dev);
199
200 return sxgbe_resume(ndev);
201}
202
203int sxgbe_platform_freeze(struct device *dev)
204{
205 struct net_device *ndev = dev_get_drvdata(dev);
206
207 return sxgbe_freeze(ndev);
208}
209
210int sxgbe_platform_restore(struct device *dev)
211{
212 struct net_device *ndev = dev_get_drvdata(dev);
213
214 return sxgbe_restore(ndev);
215}
216
217static const struct dev_pm_ops sxgbe_platform_pm_ops = {
218 .suspend = sxgbe_platform_suspend,
219 .resume = sxgbe_platform_resume,
220 .freeze = sxgbe_platform_freeze,
221 .thaw = sxgbe_platform_restore,
222 .restore = sxgbe_platform_restore,
223};
224#else
225static const struct dev_pm_ops sxgbe_platform_pm_ops;
226#endif /* CONFIG_PM */
227
228static const struct of_device_id sxgbe_dt_ids[] = {
229 { .compatible = "samsung,sxgbe-v2.0a"},
230 { /* sentinel */ }
231};
232MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
233
234struct platform_driver sxgbe_platform_driver = {
235 .probe = sxgbe_platform_probe,
236 .remove = sxgbe_platform_remove,
237 .driver = {
238 .name = SXGBE_RESOURCE_NAME,
239 .owner = THIS_MODULE,
240 .pm = &sxgbe_platform_pm_ops,
241 .of_match_table = of_match_ptr(sxgbe_dt_ids),
242 },
243};
244
245int sxgbe_register_platform(void)
246{
247 int err;
248
249 err = platform_driver_register(&sxgbe_platform_driver);
250 if (err)
251 pr_err("failed to register the platform driver\n");
252
253 return err;
254}
255
256void sxgbe_unregister_platform(void)
257{
258 platform_driver_unregister(&sxgbe_platform_driver);
259}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
new file mode 100644
index 000000000000..5a89acb4c505
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -0,0 +1,488 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_REGMAP_H__
13#define __SXGBE_REGMAP_H__
14
15/* SXGBE MAC Registers */
16#define SXGBE_CORE_TX_CONFIG_REG 0x0000
17#define SXGBE_CORE_RX_CONFIG_REG 0x0004
18#define SXGBE_CORE_PKT_FILTER_REG 0x0008
19#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C
20#define SXGBE_CORE_HASH_TABLE_REG0 0x0010
21#define SXGBE_CORE_HASH_TABLE_REG1 0x0014
22#define SXGBE_CORE_HASH_TABLE_REG2 0x0018
23#define SXGBE_CORE_HASH_TABLE_REG3 0x001C
24#define SXGBE_CORE_HASH_TABLE_REG4 0x0020
25#define SXGBE_CORE_HASH_TABLE_REG5 0x0024
26#define SXGBE_CORE_HASH_TABLE_REG6 0x0028
27#define SXGBE_CORE_HASH_TABLE_REG7 0x002C
28
29/* EEE-LPI Registers */
30#define SXGBE_CORE_LPI_CTRL_STATUS 0x00D0
31#define SXGBE_CORE_LPI_TIMER_CTRL 0x00D4
32
33/* VLAN Specific Registers */
34#define SXGBE_CORE_VLAN_TAG_REG 0x0050
35#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058
36#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060
37#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064
38#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C
39
40/* Flow Contol Registers */
41#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070
42#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074
43#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078
44#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C
45#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080
46#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084
47#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088
48#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C
49#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090
50#define SXGBE_CORE_RX_CTL0_REG 0x00A0
51#define SXGBE_CORE_RX_CTL1_REG 0x00A4
52#define SXGBE_CORE_RX_CTL2_REG 0x00A8
53#define SXGBE_CORE_RX_CTL3_REG 0x00AC
54
55/* Interrupt Registers */
56#define SXGBE_CORE_INT_STATUS_REG 0x00B0
57#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
58#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8
59#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0
60#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4
61#define SXGBE_CORE_VERSION_REG 0x0110
62#define SXGBE_CORE_DEBUG_REG 0x0114
63#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4)
64
65/* SMA(MDIO) module registers */
66#define SXGBE_MDIO_SCMD_ADD_REG 0x0200
67#define SXGBE_MDIO_SCMD_DATA_REG 0x0204
68#define SXGBE_MDIO_CCMD_WADD_REG 0x0208
69#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C
70#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210
71#define SXGBE_MDIO_INT_STATUS_REG 0x0214
72#define SXGBE_MDIO_INT_ENABLE_REG 0x0218
73#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C
74#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220
75
76/* port specific, addr = 0-3 */
77#define SXGBE_MDIO_DEV_BASE_REG 0x0230
78#define SXGBE_MDIO_PORT_DEV_REG(addr) \
79 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0)
80#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \
81 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4)
82#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \
83 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8)
84
85#define SXGBE_CORE_GPIO_CTL_REG 0x0278
86#define SXGBE_CORE_GPIO_STATUS_REG 0x027C
87
88/* Address registers for filtering */
89#define SXGBE_CORE_ADD_BASE_REG 0x0300
90
91/* addr = 0-31 */
92#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \
93 (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0)
94#define SXGBE_CORE_ADD_LOWOFFSET(addr) \
95 (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4)
96
97/* SXGBE MMC registers */
98#define SXGBE_MMC_CTL_REG 0x0800
99#define SXGBE_MMC_RXINT_STATUS_REG 0x0804
100#define SXGBE_MMC_TXINT_STATUS_REG 0x0808
101#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C
102#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810
103
104/* TX specific counters */
105#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814
106#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818
107#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C
108#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820
109#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824
110#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828
111#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C
112#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830
113#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834
114#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838
115#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C
116#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840
117#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844
118#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848
119#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C
120#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850
121#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854
122#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858
123#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C
124#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860
125#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864
126#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868
127#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C
128#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870
129#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874
130#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878
131#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C
132#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880
133#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884
134#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888
135#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C
136#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890
137#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894
138#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898
139#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C
140#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0
141
142/* RX specific counters */
143#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900
144#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904
145#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908
146#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C
147#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910
148#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914
149#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918
150#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C
151#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920
152#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924
153#define SXGBE_MMC_RXCRCERRLO_REG 0x0928
154#define SXGBE_MMC_RXCRCERRHI_REG 0x092C
155#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930
156#define SXGBE_MMC_RXJABBERERR_REG 0x0934
157#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938
158#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C
159#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940
160#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944
161#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948
162#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C
163#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950
164#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954
165#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958
166#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C
167#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960
168#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964
169#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968
170#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C
171#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970
172#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974
173#define SXGBE_MMC_RXLENERRLO_REG 0x0978
174#define SXGBE_MMC_RXLENERRHI_REG 0x097C
175#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980
176#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984
177#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988
178#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C
179#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990
180#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994
181#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998
182#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C
183#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0
184
185/* L3/L4 function registers */
186#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
187#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
188#define SXGBE_CORE_L34_DATA_REG 0x0C04
189
190/* ARP registers */
191#define SXGBE_CORE_ARP_ADD_REG 0x0C10
192
193/* RSS registers */
194#define SXGBE_CORE_RSS_CTL_REG 0x0C80
195#define SXGBE_CORE_RSS_ADD_REG 0x0C88
196#define SXGBE_CORE_RSS_DATA_REG 0x0C8C
197
198/* RSS control register bits */
199#define SXGBE_CORE_RSS_CTL_UDP4TE BIT(3)
200#define SXGBE_CORE_RSS_CTL_TCP4TE BIT(2)
201#define SXGBE_CORE_RSS_CTL_IP2TE BIT(1)
202#define SXGBE_CORE_RSS_CTL_RSSE BIT(0)
203
204/* IEEE 1588 registers */
205#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00
206#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04
207#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C
208#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10
209#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14
210#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18
211#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C
212#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20
213#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30
214#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34
215
216/* Auxiliary registers */
217#define SXGBE_CORE_AUX_CTL_REG 0x0D40
218#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48
219#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C
220#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50
221#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54
222#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58
223#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C
224#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60
225#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64
226
227/* PPS registers */
228#define SXGBE_CORE_PPS_CTL_REG 0x0D70
229#define SXGBE_CORE_PPS_BASE 0x0D80
230
231/* addr = 0 - 3 */
232#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \
233 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0)
234#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \
235 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4)
236#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \
237 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8)
238#define SXGBE_CORE_PPS_WIDTH_REG(addr) \
239 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC)
240#define SXGBE_CORE_PTO_CTL_REG 0x0DC0
241#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4
242#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8
243#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC
244#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0
245
246/* SXGBE MTL Registers */
247#define SXGBE_MTL_BASE_REG 0x1000
248#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000)
249#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008)
250#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C)
251#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010)
252#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020)
253#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030)
254#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034)
255#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038)
256#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040)
257#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044)
258
259/* TC/Queue registers, qnum=0-15 */
260#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100)
261#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \
262 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00)
263#define SXGBE_MTL_SFMODE BIT(1)
264#define SXGBE_MTL_FIFO_LSHIFT 16
265#define SXGBE_MTL_ENABLE_QUEUE 0x00000008
266#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \
267 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04)
268#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \
269 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08)
270#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \
271 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10)
272#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \
273 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14)
274#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \
275 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18)
276
277#define SXGBE_MTL_TC_RXBASE_REG 0x1140
278#define SXGBE_RX_MTL_SFMODE BIT(5)
279#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \
280 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00)
281#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \
282 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04)
283#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \
284 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08)
285#define SXGBE_MTL_RXQ_CTL_REG(qnum) \
286 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C)
287#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \
288 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30)
289#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \
290 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34)
291
292/* SXGBE DMA Registers */
293#define SXGBE_DMA_BASE_REG 0x3000
294#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000)
295#define SXGBE_DMA_SOFT_RESET BIT(0)
296#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004)
297#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0)
298#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11)
299#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008)
300#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010)
301#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018)
302#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020)
303#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024)
304#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028)
305#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C)
306#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030)
307#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034)
308
309/* Channel Registers, cha_num = 0-15 */
310#define SXGBE_DMA_CHA_BASE_REG \
311 (SXGBE_DMA_BASE_REG + 0x0100)
312#define SXGBE_DMA_CHA_CTL_REG(cha_num) \
313 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00)
314#define SXGBE_DMA_PBL_X8MODE BIT(16)
315#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12)
316#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \
317 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04)
318#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \
319 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08)
320#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \
321 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10)
322#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \
323 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14)
324#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \
325 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18)
326#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \
327 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C)
328#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \
329 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24)
330#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \
331 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C)
332#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \
333 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30)
334#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \
335 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34)
336#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \
337 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38)
338#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \
339 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C)
340#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \
341 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44)
342#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \
343 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C)
344#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \
345 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50)
346#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \
347 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54)
348#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \
349 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58)
350#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \
351 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C)
352#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \
353 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60)
354
355/* TX DMA control register specific */
356#define SXGBE_TX_START_DMA BIT(0)
357
358/* sxgbe tx configuration register bitfields */
359#define SXGBE_SPEED_10G 0x0
360#define SXGBE_SPEED_2_5G 0x1
361#define SXGBE_SPEED_1G 0x2
362#define SXGBE_SPEED_LSHIFT 29
363
364#define SXGBE_TX_ENABLE BIT(0)
365#define SXGBE_TX_DISDIC_ALGO BIT(1)
366#define SXGBE_TX_JABBER_DISABLE BIT(16)
367
368/* sxgbe rx configuration register bitfields */
369#define SXGBE_RX_ENABLE BIT(0)
370#define SXGBE_RX_ACS_ENABLE BIT(1)
371#define SXGBE_RX_WATCHDOG_DISABLE BIT(7)
372#define SXGBE_RX_JUMBPKT_ENABLE BIT(8)
373#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9)
374#define SXGBE_RX_LOOPBACK_ENABLE BIT(10)
375#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31)
376
377/* sxgbe vlan Tag Register bitfields */
378#define SXGBE_VLAN_SVLAN_ENABLE BIT(18)
379#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26)
380#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27)
381
382/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields
383 * Below fields same for Inner VLAN Tag Inclusion
384 * Register(0x0064) register
385 */
386enum vlan_tag_ctl_tx {
387 VLAN_TAG_TX_NOP,
388 VLAN_TAG_TX_DEL,
389 VLAN_TAG_TX_INSERT,
390 VLAN_TAG_TX_REPLACE
391};
392#define SXGBE_VLAN_PRTY_CTL BIT(18)
393#define SXGBE_VLAN_CSVL_CTL BIT(19)
394
395/* SXGBE TX Q Flow Control Register bitfields */
396#define SXGBE_TX_FLOW_CTL_FCB BIT(0)
397#define SXGBE_TX_FLOW_CTL_TFB BIT(1)
398
399/* SXGBE RX Q Flow Control Register bitfields */
400#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0)
401#define SXGBE_RX_UNICAST_DETECT BIT(1)
402#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8)
403
404/* sxgbe rx Q control0 register bitfields */
405#define SXGBE_RX_Q_ENABLE 0x2
406
407/* SXGBE hardware features bitfield specific */
408/* Capability Register 0 */
409#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1)
410#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4)
411#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5)
412#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6)
413#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7)
414#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8)
415#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9)
416#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12)
417#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13)
418#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14)
419#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16)
420#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18)
421#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25)
422#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27)
423
424/* Capability Register 1 */
425#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F))
426#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6)
427#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13)
428#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16)
429#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17)
430#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18)
431#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19)
432#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20)
433#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24)
434#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27)
435
436/* Capability Register 2 */
437#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F))
438#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6)
439#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12)
440#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18)
441#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24)
442#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28)
443
444/* DMAchannel interrupt enable specific */
445/* DMA Normal interrupt */
446#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */
447#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */
448#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */
449#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */
450
451#define SXGBE_DMA_INT_NORMAL \
452 (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \
453 SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE)
454
455/* DMA Abnormal interrupt */
456#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */
457#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */
458#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */
459#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */
460#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */
461#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */
462
463#define SXGBE_DMA_INT_ABNORMAL \
464 (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \
465 SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \
466 SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE)
467
468#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL)
469
470/* DMA channel interrupt status specific */
471#define SXGBE_DMA_INT_STATUS_REB2 BIT(21)
472#define SXGBE_DMA_INT_STATUS_REB1 BIT(20)
473#define SXGBE_DMA_INT_STATUS_REB0 BIT(19)
474#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18)
475#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17)
476#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16)
477#define SXGBE_DMA_INT_STATUS_NIS BIT(15)
478#define SXGBE_DMA_INT_STATUS_AIS BIT(14)
479#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13)
480#define SXGBE_DMA_INT_STATUS_FBE BIT(12)
481#define SXGBE_DMA_INT_STATUS_RPS BIT(8)
482#define SXGBE_DMA_INT_STATUS_RBU BIT(7)
483#define SXGBE_DMA_INT_STATUS_RI BIT(6)
484#define SXGBE_DMA_INT_STATUS_TBU BIT(2)
485#define SXGBE_DMA_INT_STATUS_TPS BIT(1)
486#define SXGBE_DMA_INT_STATUS_TI BIT(0)
487
488#endif /* __SXGBE_REGMAP_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
new file mode 100644
index 000000000000..51c32194ba88
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
@@ -0,0 +1,91 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/phy.h>
16#include "sxgbe_common.h"
17#include "sxgbe_xpcs.h"
18
19static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
20{
21 u32 value;
22 struct sxgbe_priv_data *priv = netdev_priv(ndev);
23
24 value = readl(priv->ioaddr + XPCS_OFFSET + reg);
25
26 return value;
27}
28
29static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
30{
31 struct sxgbe_priv_data *priv = netdev_priv(ndev);
32
33 writel(data, priv->ioaddr + XPCS_OFFSET + reg);
34
35 return 0;
36}
37
38int sxgbe_xpcs_init(struct net_device *ndev)
39{
40 u32 value;
41
42 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
43 /* 10G XAUI mode */
44 sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
45 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
46 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
47 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
48
49 do {
50 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
51 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
52
53 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
54 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
55
56 do {
57 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
58 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
59
60 return 0;
61}
62
63int sxgbe_xpcs_init_1G(struct net_device *ndev)
64{
65 int value;
66
67 /* 10GBASE-X PCS (1G) mode */
68 sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
69 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
70 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
71 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
72
73 value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
74 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
75 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
76 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
77 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
78
79 do {
80 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
81 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
82
83 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
84 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
85
86 /* Auto Negotiation cluase 37 enable */
87 value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
88 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
89
90 return 0;
91}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
new file mode 100644
index 000000000000..6b26a50724d3
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
@@ -0,0 +1,38 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Byungho An <bh74.an@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_XPCS_H__
13#define __SXGBE_XPCS_H__
14
15/* XPCS Registers */
16#define XPCS_OFFSET 0x1A060000
17#define SR_PCS_MMD_CONTROL1 0x030000
18#define SR_PCS_CONTROL2 0x030007
19#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004
20#define VR_PCS_MMD_DIGITAL_STATUS 0x038010
21#define SR_MII_MMD_CONTROL 0x1F0000
22#define SR_MII_MMD_AN_ADV 0x1F0004
23#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005
24#define VR_MII_MMD_AN_CONTROL 0x1F8001
25#define VR_MII_MMD_AN_INT_STATUS 0x1F8002
26
27#define XPCS_QSEQ_STATE_STABLE 0x10
28#define XPCS_QSEQ_STATE_MPLLOFF 0x1c
29#define XPCS_TYPE_SEL_R 0x00
30#define XPCS_TYPE_SEL_X 0x01
31#define XPCS_TYPE_SEL_W 0x02
32#define XPCS_XAUI_MODE 0x00
33#define XPCS_RXAUI_MODE 0x01
34
35int sxgbe_xpcs_init(struct net_device *ndev);
36int sxgbe_xpcs_init_1G(struct net_device *ndev);
37
38#endif /* __SXGBE_XPCS_H__ */
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
new file mode 100644
index 000000000000..a62442cf0037
--- /dev/null
+++ b/include/linux/sxgbe_platform.h
@@ -0,0 +1,54 @@
1/*
2 * 10G controller driver for Samsung EXYNOS SoCs
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#ifndef __SXGBE_PLATFORM_H__
14#define __SXGBE_PLATFORM_H__
15
16/* MDC Clock Selection define*/
17#define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */
18#define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */
19#define SXGBE_CSR_250_300M 0x2 /* MDC = clk_scr_i/122 */
20#define SXGBE_CSR_300_350M 0x3 /* MDC = clk_scr_i/142 */
21#define SXGBE_CSR_350_400M 0x4 /* MDC = clk_scr_i/162 */
22#define SXGBE_CSR_400_500M 0x5 /* MDC = clk_scr_i/202 */
23
24/* Platfrom data for platform device structure's
25 * platform_data field
26 */
27struct sxgbe_mdio_bus_data {
28 unsigned int phy_mask;
29 int *irqs;
30 int probed_phy_irq;
31};
32
33struct sxgbe_dma_cfg {
34 int pbl;
35 int fixed_burst;
36 int burst_map;
37 int adv_addr_mode;
38};
39
40struct sxgbe_plat_data {
41 char *phy_bus_name;
42 int bus_id;
43 int phy_addr;
44 int interface;
45 struct sxgbe_mdio_bus_data *mdio_bus_data;
46 struct sxgbe_dma_cfg *dma_cfg;
47 int clk_csr;
48 int pmt;
49 int force_sf_dma_mode;
50 int force_thresh_dma_mode;
51 int riwt_off;
52};
53
54#endif /* __SXGBE_PLATFORM_H__ */