aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorSiva Reddy <siva.kallam@samsung.com>2014-03-25 15:10:54 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-26 16:49:31 -0400
commit1edb9ca69e8a7988900fc0283e10550b5592164d (patch)
tree268691e0c432357fb1a55ff35cb215263a4db576 /drivers/net/ethernet
parent5221d3e66d74e2c90cd9f94acfd957da1ab1df4d (diff)
net: sxgbe: add basic framework for Samsung 10Gb ethernet driver
This patch adds support for Samsung 10Gb ethernet driver(sxgbe). - sxgbe core initialization - Tx and Rx support - MDIO support - ISRs for Tx and Rx - ifconfig support to driver Signed-off-by: Siva Reddy Kallam <siva.kallam@samsung.com> Signed-off-by: Vipul Pandya <vipul.pandya@samsung.com> Signed-off-by: Girish K S <ks.giri@samsung.com> Neatening-by: Joe Perches <joe@perches.com> Signed-off-by: Byungho An <bh74.an@samsung.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/samsung/Kconfig16
-rw-r--r--drivers/net/ethernet/samsung/Makefile5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Kconfig9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h462
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c158
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c515
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h298
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c372
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h48
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c44
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2052
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c251
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c254
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h104
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c253
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h477
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
21 files changed, 5453 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39484b534f5e..39b26fe28d10 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -150,6 +150,7 @@ config S6GMAC
150 To compile this driver as a module, choose M here. The module 150 To compile this driver as a module, choose M here. The module
151 will be called s6gmac. 151 will be called s6gmac.
152 152
153source "drivers/net/ethernet/samsung/Kconfig"
153source "drivers/net/ethernet/seeq/Kconfig" 154source "drivers/net/ethernet/seeq/Kconfig"
154source "drivers/net/ethernet/silan/Kconfig" 155source "drivers/net/ethernet/silan/Kconfig"
155source "drivers/net/ethernet/sis/Kconfig" 156source "drivers/net/ethernet/sis/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index adf61af507f7..545d0b3b9cb4 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
61obj-$(CONFIG_SH_ETH) += renesas/ 61obj-$(CONFIG_SH_ETH) += renesas/
62obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ 62obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
63obj-$(CONFIG_S6GMAC) += s6gmac.o 63obj-$(CONFIG_S6GMAC) += s6gmac.o
64obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
64obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 65obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
65obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ 66obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
66obj-$(CONFIG_NET_VENDOR_SIS) += sis/ 67obj-$(CONFIG_NET_VENDOR_SIS) += sis/
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
new file mode 100644
index 000000000000..7902341f2623
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -0,0 +1,16 @@
1#
2# Samsung Ethernet device configuration
3#
4
5config NET_VENDOR_SAMSUNG
6 bool "Samsung Ethernet device"
7 default y
8 ---help---
9 This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
10 platforms.
11
12if NET_VENDOR_SAMSUNG
13
14source "drivers/net/ethernet/samsung/sxgbe/Kconfig"
15
16endif # NET_VENDOR_SAMSUNG
diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile
new file mode 100644
index 000000000000..1773c29b8d76
--- /dev/null
+++ b/drivers/net/ethernet/samsung/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Samsung Ethernet device drivers.
3#
4
5obj-$(CONFIG_SXGBE_ETH) += sxgbe/
diff --git a/drivers/net/ethernet/samsung/sxgbe/Kconfig b/drivers/net/ethernet/samsung/sxgbe/Kconfig
new file mode 100644
index 000000000000..d79288c51d0a
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Kconfig
@@ -0,0 +1,9 @@
1config SXGBE_ETH
2 tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA
4 select PHYLIB
5 select CRC32
6 select PTP_1588_CLOCK
7 ---help---
8 This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
9 platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
new file mode 100644
index 000000000000..dcc80b9d4370
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
2samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
3 sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \
4 sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
new file mode 100644
index 000000000000..c7803f199967
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -0,0 +1,462 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __SXGBE_COMMON_H__
14#define __SXGBE_COMMON_H__
15
16/* forward references */
17struct sxgbe_desc_ops;
18struct sxgbe_dma_ops;
19struct sxgbe_mtl_ops;
20
21#define SXGBE_RESOURCE_NAME "sam_sxgbeeth"
22#define DRV_MODULE_VERSION "November_2013"
23
24/* MAX HW feature words */
25#define SXGBE_HW_WORDS 3
26
27#define SXGBE_RX_COE_NONE 0
28
29/* CSR Frequency Access Defines*/
30#define SXGBE_CSR_F_150M 150000000
31#define SXGBE_CSR_F_250M 250000000
32#define SXGBE_CSR_F_300M 300000000
33#define SXGBE_CSR_F_350M 350000000
34#define SXGBE_CSR_F_400M 400000000
35#define SXGBE_CSR_F_500M 500000000
36
37/* pause time */
38#define SXGBE_PAUSE_TIME 0x200
39
40/* tx queues */
41#define SXGBE_TX_QUEUES 8
42#define SXGBE_RX_QUEUES 16
43
44/* Calculated based how much time does it take to fill 256KB Rx memory
45 * at 10Gb speed at 156MHz clock rate and considered little less then
46 * the actual value.
47 */
48#define SXGBE_MAX_DMA_RIWT 0x70
49#define SXGBE_MIN_DMA_RIWT 0x01
50
51/* Tx coalesce parameters */
52#define SXGBE_COAL_TX_TIMER 40000
53#define SXGBE_MAX_COAL_TX_TICK 100000
54#define SXGBE_TX_MAX_FRAMES 512
55#define SXGBE_TX_FRAMES 128
56
57/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */
58#define BUF_SIZE_16KiB 16384
59#define BUF_SIZE_8KiB 8192
60#define BUF_SIZE_4KiB 4096
61#define BUF_SIZE_2KiB 2048
62
63#define SXGBE_DEFAULT_LIT_LS 0x3E8
64#define SXGBE_DEFAULT_TWT_LS 0x0
65
66/* Flow Control defines */
67#define SXGBE_FLOW_OFF 0
68#define SXGBE_FLOW_RX 1
69#define SXGBE_FLOW_TX 2
70#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX)
71
72#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
73
74/* errors */
75#define RX_GMII_ERR 0x01
76#define RX_WATCHDOG_ERR 0x02
77#define RX_CRC_ERR 0x03
78#define RX_GAINT_ERR 0x04
79#define RX_IP_HDR_ERR 0x05
80#define RX_PAYLOAD_ERR 0x06
81#define RX_OVERFLOW_ERR 0x07
82
83/* pkt type */
84#define RX_LEN_PKT 0x00
85#define RX_MACCTL_PKT 0x01
86#define RX_DCBCTL_PKT 0x02
87#define RX_ARP_PKT 0x03
88#define RX_OAM_PKT 0x04
89#define RX_UNTAG_PKT 0x05
90#define RX_OTHER_PKT 0x07
91#define RX_SVLAN_PKT 0x08
92#define RX_CVLAN_PKT 0x09
93#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A
94#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B
95#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C
96#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D
97
98#define RX_NOT_IP_PKT 0x00
99#define RX_IPV4_TCP_PKT 0x01
100#define RX_IPV4_UDP_PKT 0x02
101#define RX_IPV4_ICMP_PKT 0x03
102#define RX_IPV4_UNKNOWN_PKT 0x07
103#define RX_IPV6_TCP_PKT 0x09
104#define RX_IPV6_UDP_PKT 0x0A
105#define RX_IPV6_ICMP_PKT 0x0B
106#define RX_IPV6_UNKNOWN_PKT 0x0F
107
108#define RX_NO_PTP 0x00
109#define RX_PTP_SYNC 0x01
110#define RX_PTP_FOLLOW_UP 0x02
111#define RX_PTP_DELAY_REQ 0x03
112#define RX_PTP_DELAY_RESP 0x04
113#define RX_PTP_PDELAY_REQ 0x05
114#define RX_PTP_PDELAY_RESP 0x06
115#define RX_PTP_PDELAY_FOLLOW_UP 0x07
116#define RX_PTP_ANNOUNCE 0x08
117#define RX_PTP_MGMT 0x09
118#define RX_PTP_SIGNAL 0x0A
119#define RX_PTP_RESV_MSG 0x0F
120
121enum dma_irq_status {
122 tx_hard_error = BIT(0),
123 tx_bump_tc = BIT(1),
124 handle_tx = BIT(2),
125 rx_hard_error = BIT(3),
126 rx_bump_tc = BIT(4),
127 handle_rx = BIT(5),
128};
129
130#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \
131 NETIF_F_HW_VLAN_STAG_RX | \
132 NETIF_F_HW_VLAN_CTAG_TX | \
133 NETIF_F_HW_VLAN_STAG_TX | \
134 NETIF_F_HW_VLAN_CTAG_FILTER | \
135 NETIF_F_HW_VLAN_STAG_FILTER)
136
137/* MMC control defines */
138#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008
139
140/* SXGBE HW ADDR regs */
141#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
142 (reg * 8))
143#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
144 (reg * 8))
145#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */
146#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */
147
148/* SXGBE Frame Filter defines */
149#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
150#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
151#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
152#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
153#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
154#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
155#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
156#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
157#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
158#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
159
160#define SXGBE_HASH_TABLE_SIZE 64
161#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
162#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
163
164#define SXGBE_HI_REG_AE 0x80000000
165
166/* Minimum and maximum MTU */
167#define MIN_MTU 68
168#define MAX_MTU 9000
169
170#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \
171 for (queue_num = 0; queue_num < max_queues; queue_num++)
172
173/* sxgbe statistics counters */
174struct sxgbe_extra_stats {
175 /* TX/RX IRQ events */
176 unsigned long tx_underflow_irq;
177 unsigned long tx_process_stopped_irq;
178 unsigned long tx_ctxt_desc_err;
179 unsigned long tx_threshold;
180 unsigned long rx_threshold;
181 unsigned long tx_pkt_n;
182 unsigned long rx_pkt_n;
183 unsigned long normal_irq_n;
184 unsigned long tx_normal_irq_n;
185 unsigned long rx_normal_irq_n;
186 unsigned long napi_poll;
187 unsigned long tx_clean;
188 unsigned long tx_reset_ic_bit;
189 unsigned long rx_process_stopped_irq;
190 unsigned long rx_underflow_irq;
191
192 /* Bus access errors */
193 unsigned long fatal_bus_error_irq;
194 unsigned long tx_read_transfer_err;
195 unsigned long tx_write_transfer_err;
196 unsigned long tx_desc_access_err;
197 unsigned long tx_buffer_access_err;
198 unsigned long tx_data_transfer_err;
199 unsigned long rx_read_transfer_err;
200 unsigned long rx_write_transfer_err;
201 unsigned long rx_desc_access_err;
202 unsigned long rx_buffer_access_err;
203 unsigned long rx_data_transfer_err;
204
205 /* RX specific */
206 /* L2 error */
207 unsigned long rx_code_gmii_err;
208 unsigned long rx_watchdog_err;
209 unsigned long rx_crc_err;
210 unsigned long rx_gaint_pkt_err;
211 unsigned long ip_hdr_err;
212 unsigned long ip_payload_err;
213 unsigned long overflow_error;
214
215 /* L2 Pkt type */
216 unsigned long len_pkt;
217 unsigned long mac_ctl_pkt;
218 unsigned long dcb_ctl_pkt;
219 unsigned long arp_pkt;
220 unsigned long oam_pkt;
221 unsigned long untag_okt;
222 unsigned long other_pkt;
223 unsigned long svlan_tag_pkt;
224 unsigned long cvlan_tag_pkt;
225 unsigned long dvlan_ocvlan_icvlan_pkt;
226 unsigned long dvlan_osvlan_isvlan_pkt;
227 unsigned long dvlan_osvlan_icvlan_pkt;
228 unsigned long dvan_ocvlan_icvlan_pkt;
229
230 /* L3/L4 Pkt type */
231 unsigned long not_ip_pkt;
232 unsigned long ip4_tcp_pkt;
233 unsigned long ip4_udp_pkt;
234 unsigned long ip4_icmp_pkt;
235 unsigned long ip4_unknown_pkt;
236 unsigned long ip6_tcp_pkt;
237 unsigned long ip6_udp_pkt;
238 unsigned long ip6_icmp_pkt;
239 unsigned long ip6_unknown_pkt;
240
241 /* Filter specific */
242 unsigned long vlan_filter_match;
243 unsigned long sa_filter_fail;
244 unsigned long da_filter_fail;
245 unsigned long hash_filter_pass;
246 unsigned long l3_filter_match;
247 unsigned long l4_filter_match;
248
249 /* RX context specific */
250 unsigned long timestamp_dropped;
251 unsigned long rx_msg_type_no_ptp;
252 unsigned long rx_ptp_type_sync;
253 unsigned long rx_ptp_type_follow_up;
254 unsigned long rx_ptp_type_delay_req;
255 unsigned long rx_ptp_type_delay_resp;
256 unsigned long rx_ptp_type_pdelay_req;
257 unsigned long rx_ptp_type_pdelay_resp;
258 unsigned long rx_ptp_type_pdelay_follow_up;
259 unsigned long rx_ptp_announce;
260 unsigned long rx_ptp_mgmt;
261 unsigned long rx_ptp_signal;
262 unsigned long rx_ptp_resv_msg_type;
263};
264
265struct mac_link {
266 int port;
267 int duplex;
268 int speed;
269};
270
271struct mii_regs {
272 unsigned int addr; /* MII Address */
273 unsigned int data; /* MII Data */
274};
275
276struct sxgbe_core_ops {
277 /* MAC core initialization */
278 void (*core_init)(void __iomem *ioaddr);
279 /* Dump MAC registers */
280 void (*dump_regs)(void __iomem *ioaddr);
281 /* Handle extra events on specific interrupts hw dependent */
282 int (*host_irq_status)(void __iomem *ioaddr,
283 struct sxgbe_extra_stats *x);
284 /* Set power management mode (e.g. magic frame) */
285 void (*pmt)(void __iomem *ioaddr, unsigned long mode);
286 /* Set/Get Unicast MAC addresses */
287 void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
288 unsigned int reg_n);
289 void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
290 unsigned int reg_n);
291 void (*enable_rx)(void __iomem *ioaddr, bool enable);
292 void (*enable_tx)(void __iomem *ioaddr, bool enable);
293
294 /* controller version specific operations */
295 int (*get_controller_version)(void __iomem *ioaddr);
296
297 /* If supported then get the optional core features */
298 unsigned int (*get_hw_feature)(void __iomem *ioaddr,
299 unsigned char feature_index);
300 /* adjust SXGBE speed */
301 void (*set_speed)(void __iomem *ioaddr, unsigned char speed);
302};
303
304const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
305
306struct sxgbe_ops {
307 const struct sxgbe_core_ops *mac;
308 const struct sxgbe_desc_ops *desc;
309 const struct sxgbe_dma_ops *dma;
310 const struct sxgbe_mtl_ops *mtl;
311 struct mii_regs mii; /* MII register Addresses */
312 struct mac_link link;
313 unsigned int ctrl_uid;
314 unsigned int ctrl_id;
315};
316
317/* SXGBE private data structures */
318struct sxgbe_tx_queue {
319 unsigned int irq_no;
320 struct sxgbe_priv_data *priv_ptr;
321 struct sxgbe_tx_norm_desc *dma_tx;
322 dma_addr_t dma_tx_phy;
323 dma_addr_t *tx_skbuff_dma;
324 struct sk_buff **tx_skbuff;
325 struct timer_list txtimer;
326 spinlock_t tx_lock; /* lock for tx queues */
327 unsigned int cur_tx;
328 unsigned int dirty_tx;
329 u32 tx_count_frames;
330 u32 tx_coal_frames;
331 u32 tx_coal_timer;
332 int hwts_tx_en;
333 u8 queue_no;
334};
335
336struct sxgbe_rx_queue {
337 struct sxgbe_priv_data *priv_ptr;
338 struct sxgbe_rx_norm_desc *dma_rx;
339 struct sk_buff **rx_skbuff;
340 unsigned int cur_rx;
341 unsigned int dirty_rx;
342 unsigned int irq_no;
343 u32 rx_riwt;
344 dma_addr_t *rx_skbuff_dma;
345 dma_addr_t dma_rx_phy;
346 u8 queue_no;
347};
348
349/* SXGBE HW capabilities */
350struct sxgbe_hw_features {
351 /****** CAP [0] *******/
352 unsigned int pmt_remote_wake_up;
353 unsigned int pmt_magic_frame;
354 /* IEEE 1588-2008 */
355 unsigned int atime_stamp;
356
357 unsigned int tx_csum_offload;
358 unsigned int rx_csum_offload;
359 unsigned int multi_macaddr;
360 unsigned int tstamp_srcselect;
361 unsigned int sa_vlan_insert;
362
363 /****** CAP [1] *******/
364 unsigned int rxfifo_size;
365 unsigned int txfifo_size;
366 unsigned int atstmap_hword;
367 unsigned int dcb_enable;
368 unsigned int splithead_enable;
369 unsigned int tcpseg_offload;
370 unsigned int debug_mem;
371 unsigned int rss_enable;
372 unsigned int hash_tsize;
373 unsigned int l3l4_filer_size;
374
375 /* This value is in bytes and
376 * as mentioned in HW features
377 * of SXGBE data book
378 */
379 unsigned int rx_mtl_qsize;
380 unsigned int tx_mtl_qsize;
381
382 /****** CAP [2] *******/
383 /* TX and RX number of channels */
384 unsigned int rx_mtl_queues;
385 unsigned int tx_mtl_queues;
386 unsigned int rx_dma_channels;
387 unsigned int tx_dma_channels;
388 unsigned int pps_output_count;
389 unsigned int aux_input_count;
390};
391
392struct sxgbe_priv_data {
393 /* DMA descriptos */
394 struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES];
395 struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
396 u8 cur_rx_qnum;
397
398 unsigned int dma_tx_size;
399 unsigned int dma_rx_size;
400 unsigned int dma_buf_sz;
401 u32 rx_riwt;
402
403 struct napi_struct napi;
404
405 void __iomem *ioaddr;
406 struct net_device *dev;
407 struct device *device;
408 struct sxgbe_ops *hw; /* sxgbe specific ops */
409 int no_csum_insertion;
410 int irq;
411 spinlock_t stats_lock; /* lock for tx/rx statatics */
412
413 struct phy_device *phydev;
414 int oldlink;
415 int speed;
416 int oldduplex;
417 struct mii_bus *mii;
418 int mii_irq[PHY_MAX_ADDR];
419 u8 rx_pause;
420 u8 tx_pause;
421
422 struct sxgbe_extra_stats xstats;
423 struct sxgbe_plat_data *plat;
424 struct sxgbe_hw_features hw_cap;
425
426 u32 msg_enable;
427
428 struct clk *sxgbe_clk;
429 int clk_csr;
430 unsigned int mode;
431 unsigned int default_addend;
432
433 /* advanced time stamp support */
434 u32 adv_ts;
435 int use_riwt;
436
437 /* tc control */
438 int tx_tc;
439 int rx_tc;
440};
441
442/* Function prototypes */
443struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
444 struct sxgbe_plat_data *plat_dat,
445 void __iomem *addr);
446int sxgbe_drv_remove(struct net_device *ndev);
447void sxgbe_set_ethtool_ops(struct net_device *netdev);
448int sxgbe_mdio_unregister(struct net_device *ndev);
449int sxgbe_mdio_register(struct net_device *ndev);
450int sxgbe_register_platform(void);
451void sxgbe_unregister_platform(void);
452
453#ifdef CONFIG_PM
454int sxgbe_suspend(struct net_device *ndev);
455int sxgbe_resume(struct net_device *ndev);
456int sxgbe_freeze(struct net_device *ndev);
457int sxgbe_restore(struct net_device *ndev);
458#endif /* CONFIG_PM */
459
460const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
461
462#endif /* __SXGBE_COMMON_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
new file mode 100644
index 000000000000..4ad31bbc42c9
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -0,0 +1,158 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/export.h>
16#include <linux/io.h>
17#include <linux/netdevice.h>
18#include <linux/phy.h>
19
20#include "sxgbe_common.h"
21#include "sxgbe_reg.h"
22
23/* MAC core initialization */
24static void sxgbe_core_init(void __iomem *ioaddr)
25{
26 u32 regval;
27
28 /* TX configuration */
29 regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
30 /* Other configurable parameters IFP, IPG, ISR, ISM
31 * needs to be set if needed
32 */
33 regval |= SXGBE_TX_JABBER_DISABLE;
34 writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
35
36 /* RX configuration */
37 regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
38 /* Other configurable parameters CST, SPEN, USP, GPSLCE
39 * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
40 * set if needed
41 */
42 regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
43 writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
44}
45
46/* Dump MAC registers */
47static void sxgbe_core_dump_regs(void __iomem *ioaddr)
48{
49}
50
51/* Handle extra events on specific interrupts hw dependent */
52static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
53 struct sxgbe_extra_stats *x)
54{
55 return 0;
56}
57
58/* Set power management mode (e.g. magic frame) */
59static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
60{
61}
62
63/* Set/Get Unicast MAC addresses */
64static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
65 unsigned int reg_n)
66{
67 u32 high_word, low_word;
68
69 high_word = (addr[5] << 8) || (addr[4]);
70 low_word = ((addr[3] << 24) || (addr[2] << 16) ||
71 (addr[1] << 8) || (addr[0]));
72 writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
73 writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
74}
75
76static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
77 unsigned int reg_n)
78{
79 u32 high_word, low_word;
80
81 high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
82 low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
83
84 /* extract and assign address */
85 addr[5] = (high_word & 0x0000FF00) >> 8;
86 addr[4] = (high_word & 0x000000FF);
87 addr[3] = (low_word & 0xFF000000) >> 24;
88 addr[2] = (low_word & 0x00FF0000) >> 16;
89 addr[1] = (low_word & 0x0000FF00) >> 8;
90 addr[0] = (low_word & 0x000000FF);
91}
92
93static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
94{
95 u32 tx_config;
96
97 tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
98 tx_config &= ~SXGBE_TX_ENABLE;
99
100 if (enable)
101 tx_config |= SXGBE_TX_ENABLE;
102 writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
103}
104
105static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
106{
107 u32 rx_config;
108
109 rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
110 rx_config &= ~SXGBE_RX_ENABLE;
111
112 if (enable)
113 rx_config |= SXGBE_RX_ENABLE;
114 writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
115}
116
117static int sxgbe_get_controller_version(void __iomem *ioaddr)
118{
119 return readl(ioaddr + SXGBE_CORE_VERSION_REG);
120}
121
122/* If supported then get the optional core features */
123static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
124 unsigned char feature_index)
125{
126 return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
127}
128
129static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
130{
131 u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
132
133 /* clear the speed bits */
134 tx_cfg &= ~0x60000000;
135 tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
136
137 /* set the speed */
138 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
139}
140
141const struct sxgbe_core_ops core_ops = {
142 .core_init = sxgbe_core_init,
143 .dump_regs = sxgbe_core_dump_regs,
144 .host_irq_status = sxgbe_core_host_irq_status,
145 .pmt = sxgbe_core_pmt,
146 .set_umac_addr = sxgbe_core_set_umac_addr,
147 .get_umac_addr = sxgbe_core_get_umac_addr,
148 .enable_rx = sxgbe_enable_rx,
149 .enable_tx = sxgbe_enable_tx,
150 .get_controller_version = sxgbe_get_controller_version,
151 .get_hw_feature = sxgbe_get_hw_feature,
152 .set_speed = sxgbe_core_set_speed,
153};
154
155const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
156{
157 return &core_ops;
158}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
new file mode 100644
index 000000000000..7cb5520475b7
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -0,0 +1,515 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/bitops.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/netdevice.h>
19#include <linux/phy.h>
20
21#include "sxgbe_common.h"
22#include "sxgbe_dma.h"
23#include "sxgbe_desc.h"
24
25/* DMA TX descriptor ring initialization */
26static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
27{
28 p->tdes23.tx_rd_des23.own_bit = 0;
29}
30
31static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
32 u32 total_hdr_len, u32 tcp_hdr_len,
33 u32 tcp_payload_len)
34{
35 p->tdes23.tx_rd_des23.tse_bit = is_tse;
36 p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
37 p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
38 p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len;
39}
40
41/* Assign buffer lengths for descriptor */
42static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
43 int buf1_len, int pkt_len, int cksum)
44{
45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
49
50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
52}
53
54/* Set VLAN control information */
55static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
56{
57 p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
58}
59
60/* Set the owner of Normal descriptor */
61static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
62{
63 p->tdes23.tx_rd_des23.own_bit = 1;
64}
65
66/* Get the owner of Normal descriptor */
67static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
68{
69 return p->tdes23.tx_rd_des23.own_bit;
70}
71
72/* Invoked by the xmit function to close the tx descriptor */
73static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
74{
75 p->tdes23.tx_rd_des23.last_desc = 1;
76 p->tdes23.tx_rd_des23.int_on_com = 1;
77}
78
79/* Clean the tx descriptor as soon as the tx irq is received */
80static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
81{
82 memset(p, 0, sizeof(*p));
83}
84
85/* Clear interrupt on tx frame completion. When this bit is
86 * set an interrupt happens as soon as the frame is transmitted
87 */
88static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
89{
90 p->tdes23.tx_rd_des23.int_on_com = 0;
91}
92
93/* Last tx segment reports the transmit status */
94static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
95{
96 return p->tdes23.tx_rd_des23.last_desc;
97}
98
99/* Get the buffer size from the descriptor */
100static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
101{
102 return p->tdes23.tx_rd_des23.buf1_size;
103}
104
105/* Set tx timestamp enable bit */
106static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
107{
108 p->tdes23.tx_rd_des23.timestmp_enable = 1;
109}
110
111/* get tx timestamp status */
112static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
113{
114 return p->tdes23.tx_rd_des23.timestmp_enable;
115}
116
117/* TX Context Descripto Specific */
118static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
119{
120 p->ctxt_bit = 1;
121}
122
123/* Set the owner of TX context descriptor */
124static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
125{
126 p->own_bit = 1;
127}
128
129/* Get the owner of TX context descriptor */
130static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
131{
132 return p->own_bit;
133}
134
135/* Set TX mss in TX context Descriptor */
136static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, int mss)
137{
138 p->maxseg_size = mss;
139}
140
141/* Get TX mss from TX context Descriptor */
142static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
143{
144 return p->maxseg_size;
145}
146
147/* Set TX tcmssv in TX context Descriptor */
148static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
149{
150 p->tcmssv = 1;
151}
152
153/* Reset TX ostc in TX context Descriptor */
154static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
155{
156 p->ostc = 0;
157}
158
159/* Set IVLAN information */
160static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
161 int is_ivlanvalid, int ivlan_tag,
162 int ivlan_ctl)
163{
164 if (is_ivlanvalid) {
165 p->ivlan_tag_valid = is_ivlanvalid;
166 p->ivlan_tag = ivlan_tag;
167 p->ivlan_tag_ctl = ivlan_ctl;
168 }
169}
170
171/* Return IVLAN Tag */
172static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
173{
174 return p->ivlan_tag;
175}
176
177/* Set VLAN Tag */
178static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
179 int is_vlanvalid, int vlan_tag)
180{
181 if (is_vlanvalid) {
182 p->vltag_valid = is_vlanvalid;
183 p->vlan_tag = vlan_tag;
184 }
185}
186
187/* Return VLAN Tag */
188static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
189{
190 return p->vlan_tag;
191}
192
193/* Set Time stamp */
194static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
195 u8 ostc_enable, u64 tstamp)
196{
197 if (ostc_enable) {
198 p->ostc = ostc_enable;
199 p->tstamp_lo = (u32) tstamp;
200 p->tstamp_hi = (u32) (tstamp>>32);
201 }
202}
203/* Close TX context descriptor */
204static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
205{
206 p->own_bit = 1;
207}
208
209/* WB status of context descriptor */
210static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
211{
212 return p->ctxt_desc_err;
213}
214
215/* DMA RX descriptor ring initialization */
216static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
217 int mode, int end)
218{
219 p->rdes23.rx_rd_des23.own_bit = 1;
220 if (disable_rx_ic)
221 p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
222}
223
224/* Get RX own bit */
225static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
226{
227 return p->rdes23.rx_rd_des23.own_bit;
228}
229
230/* Set RX own bit */
231static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
232{
233 p->rdes23.rx_rd_des23.own_bit = 1;
234}
235
236/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{
239 return p->rdes23.rx_wb_des23.pkt_len;
240}
241
242/* Return first Descriptor status */
243static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
244{
245 return p->rdes23.rx_wb_des23.first_desc;
246}
247
248/* Return Last Descriptor status */
249static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
250{
251 return p->rdes23.rx_wb_des23.last_desc;
252}
253
254
255/* Return the RX status looking at the WB fields */
256static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
257 struct sxgbe_extra_stats *x, int *checksum)
258{
259 int status = 0;
260
261 *checksum = CHECKSUM_UNNECESSARY;
262 if (p->rdes23.rx_wb_des23.err_summary) {
263 switch (p->rdes23.rx_wb_des23.err_l2_type) {
264 case RX_GMII_ERR:
265 status = -EINVAL;
266 x->rx_code_gmii_err++;
267 break;
268 case RX_WATCHDOG_ERR:
269 status = -EINVAL;
270 x->rx_watchdog_err++;
271 break;
272 case RX_CRC_ERR:
273 status = -EINVAL;
274 x->rx_crc_err++;
275 break;
276 case RX_GAINT_ERR:
277 status = -EINVAL;
278 x->rx_gaint_pkt_err++;
279 break;
280 case RX_IP_HDR_ERR:
281 *checksum = CHECKSUM_NONE;
282 x->ip_hdr_err++;
283 break;
284 case RX_PAYLOAD_ERR:
285 *checksum = CHECKSUM_NONE;
286 x->ip_payload_err++;
287 break;
288 case RX_OVERFLOW_ERR:
289 status = -EINVAL;
290 x->overflow_error++;
291 break;
292 default:
293 pr_err("Invalid Error type\n");
294 break;
295 }
296 } else {
297 switch (p->rdes23.rx_wb_des23.err_l2_type) {
298 case RX_LEN_PKT:
299 x->len_pkt++;
300 break;
301 case RX_MACCTL_PKT:
302 x->mac_ctl_pkt++;
303 break;
304 case RX_DCBCTL_PKT:
305 x->dcb_ctl_pkt++;
306 break;
307 case RX_ARP_PKT:
308 x->arp_pkt++;
309 break;
310 case RX_OAM_PKT:
311 x->oam_pkt++;
312 break;
313 case RX_UNTAG_PKT:
314 x->untag_okt++;
315 break;
316 case RX_OTHER_PKT:
317 x->other_pkt++;
318 break;
319 case RX_SVLAN_PKT:
320 x->svlan_tag_pkt++;
321 break;
322 case RX_CVLAN_PKT:
323 x->cvlan_tag_pkt++;
324 break;
325 case RX_DVLAN_OCVLAN_ICVLAN_PKT:
326 x->dvlan_ocvlan_icvlan_pkt++;
327 break;
328 case RX_DVLAN_OSVLAN_ISVLAN_PKT:
329 x->dvlan_osvlan_isvlan_pkt++;
330 break;
331 case RX_DVLAN_OSVLAN_ICVLAN_PKT:
332 x->dvlan_osvlan_icvlan_pkt++;
333 break;
334 case RX_DVLAN_OCVLAN_ISVLAN_PKT:
335 x->dvlan_ocvlan_icvlan_pkt++;
336 break;
337 default:
338 pr_err("Invalid L2 Packet type\n");
339 break;
340 }
341 }
342
343 /* L3/L4 Pkt type */
344 switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
345 case RX_NOT_IP_PKT:
346 x->not_ip_pkt++;
347 break;
348 case RX_IPV4_TCP_PKT:
349 x->ip4_tcp_pkt++;
350 break;
351 case RX_IPV4_UDP_PKT:
352 x->ip4_udp_pkt++;
353 break;
354 case RX_IPV4_ICMP_PKT:
355 x->ip4_icmp_pkt++;
356 break;
357 case RX_IPV4_UNKNOWN_PKT:
358 x->ip4_unknown_pkt++;
359 break;
360 case RX_IPV6_TCP_PKT:
361 x->ip6_tcp_pkt++;
362 break;
363 case RX_IPV6_UDP_PKT:
364 x->ip6_udp_pkt++;
365 break;
366 case RX_IPV6_ICMP_PKT:
367 x->ip6_icmp_pkt++;
368 break;
369 case RX_IPV6_UNKNOWN_PKT:
370 x->ip6_unknown_pkt++;
371 break;
372 default:
373 pr_err("Invalid L3/L4 Packet type\n");
374 break;
375 }
376
377 /* Filter */
378 if (p->rdes23.rx_wb_des23.vlan_filter_match)
379 x->vlan_filter_match++;
380
381 if (p->rdes23.rx_wb_des23.sa_filter_fail) {
382 status = -EINVAL;
383 x->sa_filter_fail++;
384 }
385 if (p->rdes23.rx_wb_des23.da_filter_fail) {
386 status = -EINVAL;
387 x->da_filter_fail++;
388 }
389 if (p->rdes23.rx_wb_des23.hash_filter_pass)
390 x->hash_filter_pass++;
391
392 if (p->rdes23.rx_wb_des23.l3_filter_match)
393 x->l3_filter_match++;
394
395 if (p->rdes23.rx_wb_des23.l4_filter_match)
396 x->l4_filter_match++;
397
398 return status;
399}
400
401/* Get own bit of context descriptor */
402static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
403{
404 return p->own_bit;
405}
406
407/* Set own bit for context descriptor */
408static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
409{
410 p->own_bit = 1;
411}
412
413
414/* Return the reception status looking at Context control information */
415static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
416 struct sxgbe_extra_stats *x)
417{
418 if (p->tstamp_dropped)
419 x->timestamp_dropped++;
420
421 /* ptp */
422 if (p->ptp_msgtype == RX_NO_PTP)
423 x->rx_msg_type_no_ptp++;
424 else if (p->ptp_msgtype == RX_PTP_SYNC)
425 x->rx_ptp_type_sync++;
426 else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
427 x->rx_ptp_type_follow_up++;
428 else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
429 x->rx_ptp_type_delay_req++;
430 else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
431 x->rx_ptp_type_delay_resp++;
432 else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
433 x->rx_ptp_type_pdelay_req++;
434 else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
435 x->rx_ptp_type_pdelay_resp++;
436 else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
437 x->rx_ptp_type_pdelay_follow_up++;
438 else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
439 x->rx_ptp_announce++;
440 else if (p->ptp_msgtype == RX_PTP_MGMT)
441 x->rx_ptp_mgmt++;
442 else if (p->ptp_msgtype == RX_PTP_SIGNAL)
443 x->rx_ptp_signal++;
444 else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
445 x->rx_ptp_resv_msg_type++;
446}
447
448/* Get rx timestamp status */
449static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
450{
451 if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
452 pr_err("Time stamp corrupted\n");
453 return 0;
454 }
455
456 return p->tstamp_available;
457}
458
459
460static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
461{
462 u64 ns;
463
464 ns = p->tstamp_lo;
465 ns |= ((u64)p->tstamp_hi) << 32;
466
467 return ns;
468}
469
470static const struct sxgbe_desc_ops desc_ops = {
471 .init_tx_desc = sxgbe_init_tx_desc,
472 .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse,
473 .prepare_tx_desc = sxgbe_prepare_tx_desc,
474 .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc,
475 .set_tx_owner = sxgbe_set_tx_owner,
476 .get_tx_owner = sxgbe_get_tx_owner,
477 .close_tx_desc = sxgbe_close_tx_desc,
478 .release_tx_desc = sxgbe_release_tx_desc,
479 .clear_tx_ic = sxgbe_clear_tx_ic,
480 .get_tx_ls = sxgbe_get_tx_ls,
481 .get_tx_len = sxgbe_get_tx_len,
482 .tx_enable_tstamp = sxgbe_tx_enable_tstamp,
483 .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status,
484 .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt,
485 .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner,
486 .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner,
487 .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss,
488 .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss,
489 .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv,
490 .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc,
491 .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag,
492 .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag,
493 .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag,
494 .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag,
495 .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp,
496 .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close,
497 .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde,
498 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status,
504 .rx_wbstatus = sxgbe_rx_wbstatus,
505 .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner,
506 .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner,
507 .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus,
508 .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status,
509 .get_timestamp = sxgbe_get_rx_timestamp,
510};
511
512const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void)
513{
514 return &desc_ops;
515}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
new file mode 100644
index 000000000000..2caef1ae1ac5
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -0,0 +1,298 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_DESC_H__
13#define __SXGBE_DESC_H__
14
15#define SXGBE_DESC_SIZE_BYTES 16
16
17/* forward declaration */
18struct sxgbe_extra_stats;
19
20/* Transmit checksum insertion control */
21enum tdes_csum_insertion {
22 cic_disabled = 0, /* Checksum Insertion Control */
23 cic_only_ip = 1, /* Only IP header */
24 /* IP header but pseudoheader is not calculated */
25 cic_no_pseudoheader = 2,
26 cic_full = 3, /* IP header and pseudoheader */
27};
28
29struct sxgbe_tx_norm_desc {
30 u64 tdes01; /* buf1 address */
31 union {
32 /* TX Read-Format Desc 2,3 */
33 struct {
34 /* TDES2 */
35 u32 buf1_size:14;
36 u32 vlan_tag_ctl:2;
37 u32 buf2_size:14;
38 u32 timestmp_enable:1;
39 u32 int_on_com:1;
40 /* TDES3 */
41 union {
42 u32 tcp_payload_len:18;
43 struct {
44 u32 total_pkt_len:15;
45 u32 reserved1:1;
46 u32 cksum_ctl:2;
47 } cksum_pktlen;
48 } tx_pkt_len;
49
50 u32 tse_bit:1;
51 u32 tcp_hdr_len:4;
52 u32 sa_insert_ctl:3;
53 u32 crc_pad_ctl:2;
54 u32 last_desc:1;
55 u32 first_desc:1;
56 u32 ctxt_bit:1;
57 u32 own_bit:1;
58 } tx_rd_des23;
59
60 /* tx write back Desc 2,3 */
61 struct {
62 /* WB TES2 */
63 u32 reserved1;
64 /* WB TES3 */
65 u32 reserved2:31;
66 u32 own_bit:1;
67 } tx_wb_des23;
68 } tdes23;
69};
70
71struct sxgbe_rx_norm_desc {
72 union {
73 u32 rdes0; /* buf1 address */
74 struct {
75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16;
77 } wb_rx_des0;
78 } rd_wb_des0;
79
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84
85 union {
86 /* RX Read format Desc 2,3 */
87 struct{
88 /* RDES2 */
89 u32 buf2_addr;
90 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1;
93 u32 own_bit:1;
94 } rx_rd_des23;
95
96 /* RX write back */
97 struct{
98 /* WB RDES2 */
99 u32 hdr_len:10;
100 u32 rdes2_reserved:2;
101 u32 elrd_val:1;
102 u32 iovt_sel:1;
103 u32 res_pkt:1;
104 u32 vlan_filter_match:1;
105 u32 sa_filter_fail:1;
106 u32 da_filter_fail:1;
107 u32 hash_filter_pass:1;
108 u32 macaddr_filter_match:8;
109 u32 l3_filter_match:1;
110 u32 l4_filter_match:1;
111 u32 l34_filter_num:3;
112
113 /* WB RDES3 */
114 u32 pkt_len:14;
115 u32 rdes3_reserved:1;
116 u32 err_summary:15;
117 u32 err_l2_type:4;
118 u32 layer34_pkt_type:4;
119 u32 no_coagulation_pkt:1;
120 u32 in_seq_pkt:1;
121 u32 rss_valid:1;
122 u32 context_des_avail:1;
123 u32 last_desc:1;
124 u32 first_desc:1;
125 u32 recv_context_desc:1;
126 u32 own_bit:1;
127 } rx_wb_des23;
128 } rdes23;
129};
130
131/* Context descriptor structure */
132struct sxgbe_tx_ctxt_desc {
133 u32 tstamp_lo;
134 u32 tstamp_hi;
135 u32 maxseg_size:15;
136 u32 reserved1:1;
137 u32 ivlan_tag:16;
138 u32 vlan_tag:16;
139 u32 vltag_valid:1;
140 u32 ivlan_tag_valid:1;
141 u32 ivlan_tag_ctl:2;
142 u32 reserved2:3;
143 u32 ctxt_desc_err:1;
144 u32 reserved3:2;
145 u32 ostc:1;
146 u32 tcmssv:1;
147 u32 reserved4:2;
148 u32 ctxt_bit:1;
149 u32 own_bit:1;
150};
151
152struct sxgbe_rx_ctxt_desc {
153 u32 tstamp_lo;
154 u32 tstamp_hi;
155 u32 reserved1;
156 u32 ptp_msgtype:4;
157 u32 tstamp_available:1;
158 u32 ptp_rsp_err:1;
159 u32 tstamp_dropped:1;
160 u32 reserved2:23;
161 u32 rx_ctxt_desc:1;
162 u32 own_bit:1;
163};
164
165struct sxgbe_desc_ops {
166 /* DMA TX descriptor ring initialization */
167 void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
168
169 /* Invoked by the xmit function to prepare the tx descriptor */
170 void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
171 u32 total_hdr_len, u32 payload_len,
172 u32 tcp_payload_len);
173
174 /* Assign buffer lengths for descriptor */
175 void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
176 int buf1_len, int pkt_len, int cksum);
177
178 /* Set VLAN control information */
179 void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
180
181 /* Set the owner of the descriptor */
182 void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
183
184 /* Get the owner of the descriptor */
185 int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
186
187 /* Invoked by the xmit function to close the tx descriptor */
188 void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
189
190 /* Clean the tx descriptor as soon as the tx irq is received */
191 void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
192
193 /* Clear interrupt on tx frame completion. When this bit is
194 * set an interrupt happens as soon as the frame is transmitted
195 */
196 void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
197
198 /* Last tx segment reports the transmit status */
199 int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
200
201 /* Get the buffer size from the descriptor */
202 int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
203
204 /* Set tx timestamp enable bit */
205 void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
206
207 /* get tx timestamp status */
208 int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
209
210 /* TX Context Descripto Specific */
211 void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
212
213 /* Set the owner of the TX context descriptor */
214 void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
215
216 /* Get the owner of the TX context descriptor */
217 int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
218
219 /* Set TX mss */
220 void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, int mss);
221
222 /* Set TX mss */
223 int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
224
225 /* Set TX tcmssv */
226 void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
227
228 /* Reset TX ostc */
229 void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
230
231 /* Set IVLAN information */
232 void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
233 int is_ivlanvalid, int ivlan_tag,
234 int ivlan_ctl);
235
236 /* Return IVLAN Tag */
237 int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
238
239 /* Set VLAN Tag */
240 void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
241 int is_vlanvalid, int vlan_tag);
242
243 /* Return VLAN Tag */
244 int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
245
246 /* Set Time stamp */
247 void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
248 u8 ostc_enable, u64 tstamp);
249
250 /* Close TX context descriptor */
251 void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
252
253 /* WB status of context descriptor */
254 int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
255
256 /* DMA RX descriptor ring initialization */
257 void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
258 int mode, int end);
259
260 /* Get own bit */
261 int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
262
263 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265
266 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268
269 /* Return first Descriptor status */
270 int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
271
272 /* Return first Descriptor status */
273 int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
274
275 /* Return the reception status looking at the RDES1 */
276 int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
277 struct sxgbe_extra_stats *x, int *checksum);
278
279 /* Get own bit */
280 int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
281
282 /* Set own bit */
283 void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
284
285 /* Return the reception status looking at Context control information */
286 void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
287 struct sxgbe_extra_stats *x);
288
289 /* Get rx timestamp status */
290 int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
291
292 /* Get timestamp value for rx, need to check this */
293 u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
294};
295
296const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void);
297
298#endif /* __SXGBE_DESC_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
new file mode 100644
index 000000000000..59d2d3976277
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -0,0 +1,372 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/io.h>
13#include <linux/delay.h>
14#include <linux/export.h>
15#include <linux/io.h>
16#include <linux/netdevice.h>
17#include <linux/phy.h>
18
19#include "sxgbe_common.h"
20#include "sxgbe_dma.h"
21#include "sxgbe_reg.h"
22#include "sxgbe_desc.h"
23
24/* DMA core initialization */
25static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
26{
27 int retry_count = 10;
28 u32 reg_val;
29
30 /* reset the DMA */
31 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
32 while (retry_count--) {
33 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
34 SXGBE_DMA_SOFT_RESET))
35 break;
36 mdelay(10);
37 }
38
39 if (retry_count < 0)
40 return -EBUSY;
41
42 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
43
44 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
45 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
46 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
47 * Set burst_map irrespective of fix_burst value.
48 */
49 if (!fix_burst)
50 reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
51
52 /* write burst len map */
53 reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
54
55 writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
56
57 return 0;
58}
59
60static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
61 int fix_burst, int pbl, dma_addr_t dma_tx,
62 dma_addr_t dma_rx, int t_rsize, int r_rsize)
63{
64 u32 reg_val;
65 dma_addr_t dma_addr;
66
67 reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
68 /* set the pbl */
69 if (fix_burst) {
70 reg_val |= SXGBE_DMA_PBL_X8MODE;
71 writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
72 /* program the TX pbl */
73 reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
74 reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
75 writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
76 /* program the RX pbl */
77 reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
78 reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
79 writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
80 }
81
82 /* program desc registers */
83 writel(upper_32_bits(dma_tx),
84 ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
85 writel(lower_32_bits(dma_tx),
86 ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
87
88 writel(upper_32_bits(dma_rx),
89 ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
90 writel(lower_32_bits(dma_rx),
91 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
92
93 /* program tail pointers */
94 /* assumption: upper 32 bits are constant and
95 * same as TX/RX desc list
96 */
97 dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
98 writel(lower_32_bits(dma_addr),
99 ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
100
101 dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
102 writel(lower_32_bits(dma_addr),
103 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
104 /* program the ring sizes */
105 writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
106 writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
107
108 /* Enable TX/RX interrupts */
109 writel(SXGBE_DMA_ENA_INT,
110 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
111}
112
113static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
114{
115 u32 tx_config;
116
117 tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
118 tx_config |= SXGBE_TX_START_DMA;
119 writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
120}
121
122static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
123{
124 /* Enable TX/RX interrupts */
125 writel(SXGBE_DMA_ENA_INT,
126 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
127}
128
129static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
130{
131 /* Disable TX/RX interrupts */
132 writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
133}
134
135static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
136{
137 int cnum;
138 u32 tx_ctl_reg;
139
140 for (cnum = 0; cnum < tchannels; cnum++) {
141 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
142 tx_ctl_reg |= SXGBE_TX_ENABLE;
143 writel(tx_ctl_reg,
144 ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
145 }
146}
147
148static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
149{
150 u32 tx_ctl_reg;
151
152 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
153 tx_ctl_reg |= SXGBE_TX_ENABLE;
154 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
155}
156
157static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
158{
159 u32 tx_ctl_reg;
160
161 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
162 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
163 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
164}
165
166static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
167{
168 int cnum;
169 u32 tx_ctl_reg;
170
171 for (cnum = 0; cnum < tchannels; cnum++) {
172 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
173 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
174 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
175 }
176}
177
178static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
179{
180 int cnum;
181 u32 rx_ctl_reg;
182
183 for (cnum = 0; cnum < rchannels; cnum++) {
184 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
185 rx_ctl_reg |= SXGBE_RX_ENABLE;
186 writel(rx_ctl_reg,
187 ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
188 }
189}
190
191static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
192{
193 int cnum;
194 u32 rx_ctl_reg;
195
196 for (cnum = 0; cnum < rchannels; cnum++) {
197 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
198 rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
199 writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
200 }
201}
202
203static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
204 struct sxgbe_extra_stats *x)
205{
206 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
207 u32 clear_val = 0;
208 u32 ret_val = 0;
209
210 /* TX Normal Interrupt Summary */
211 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
212 x->normal_irq_n++;
213 if (int_status & SXGBE_DMA_INT_STATUS_TI) {
214 ret_val |= handle_tx;
215 x->tx_normal_irq_n++;
216 clear_val |= SXGBE_DMA_INT_STATUS_TI;
217 }
218
219 if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
220 x->tx_underflow_irq++;
221 ret_val |= tx_bump_tc;
222 clear_val |= SXGBE_DMA_INT_STATUS_TBU;
223 }
224 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
225 /* TX Abnormal Interrupt Summary */
226 if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
227 ret_val |= tx_hard_error;
228 clear_val |= SXGBE_DMA_INT_STATUS_TPS;
229 x->tx_process_stopped_irq++;
230 }
231
232 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
233 ret_val |= tx_hard_error;
234 x->fatal_bus_error_irq++;
235
236 /* Assumption: FBE bit is the combination of
237 * all the bus access erros and cleared when
238 * the respective error bits cleared
239 */
240
241 /* check for actual cause */
242 if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
243 x->tx_read_transfer_err++;
244 clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
245 } else {
246 x->tx_write_transfer_err++;
247 }
248
249 if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
250 x->tx_desc_access_err++;
251 clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
252 } else {
253 x->tx_buffer_access_err++;
254 }
255
256 if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
257 x->tx_data_transfer_err++;
258 clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
259 }
260 }
261
262 /* context descriptor error */
263 if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
264 x->tx_ctxt_desc_err++;
265 clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
266 }
267 }
268
269 /* clear the served bits */
270 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
271
272 return ret_val;
273}
274
275static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
276 struct sxgbe_extra_stats *x)
277{
278 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
279 u32 clear_val = 0;
280 u32 ret_val = 0;
281
282 /* RX Normal Interrupt Summary */
283 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
284 x->normal_irq_n++;
285 if (int_status & SXGBE_DMA_INT_STATUS_RI) {
286 ret_val |= handle_rx;
287 x->rx_normal_irq_n++;
288 clear_val |= SXGBE_DMA_INT_STATUS_RI;
289 }
290 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
291 /* RX Abnormal Interrupt Summary */
292 if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
293 ret_val |= rx_bump_tc;
294 clear_val |= SXGBE_DMA_INT_STATUS_RBU;
295 x->rx_underflow_irq++;
296 }
297
298 if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
299 ret_val |= rx_hard_error;
300 clear_val |= SXGBE_DMA_INT_STATUS_RPS;
301 x->rx_process_stopped_irq++;
302 }
303
304 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
305 ret_val |= rx_hard_error;
306 x->fatal_bus_error_irq++;
307
308 /* Assumption: FBE bit is the combination of
309 * all the bus access erros and cleared when
310 * the respective error bits cleared
311 */
312
313 /* check for actual cause */
314 if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
315 x->rx_read_transfer_err++;
316 clear_val |= SXGBE_DMA_INT_STATUS_REB0;
317 } else {
318 x->rx_write_transfer_err++;
319 }
320
321 if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
322 x->rx_desc_access_err++;
323 clear_val |= SXGBE_DMA_INT_STATUS_REB1;
324 } else {
325 x->rx_buffer_access_err++;
326 }
327
328 if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
329 x->rx_data_transfer_err++;
330 clear_val |= SXGBE_DMA_INT_STATUS_REB2;
331 }
332 }
333 }
334
335 /* clear the served bits */
336 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
337
338 return ret_val;
339}
340
341/* Program the HW RX Watchdog */
342static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
343{
344 u32 que_num;
345
346 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
347 writel(riwt,
348 ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
349 }
350}
351
352static const struct sxgbe_dma_ops sxgbe_dma_ops = {
353 .init = sxgbe_dma_init,
354 .cha_init = sxgbe_dma_channel_init,
355 .enable_dma_transmission = sxgbe_enable_dma_transmission,
356 .enable_dma_irq = sxgbe_enable_dma_irq,
357 .disable_dma_irq = sxgbe_disable_dma_irq,
358 .start_tx = sxgbe_dma_start_tx,
359 .start_tx_queue = sxgbe_dma_start_tx_queue,
360 .stop_tx = sxgbe_dma_stop_tx,
361 .stop_tx_queue = sxgbe_dma_stop_tx_queue,
362 .start_rx = sxgbe_dma_start_rx,
363 .stop_rx = sxgbe_dma_stop_rx,
364 .tx_dma_int_status = sxgbe_tx_dma_int_status,
365 .rx_dma_int_status = sxgbe_rx_dma_int_status,
366 .rx_watchdog = sxgbe_dma_rx_watchdog,
367};
368
369const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
370{
371 return &sxgbe_dma_ops;
372}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
new file mode 100644
index 000000000000..bbf167efb60c
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
@@ -0,0 +1,48 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_DMA_H__
13#define __SXGBE_DMA_H__
14
15/* forward declaration */
16struct sxgbe_extra_stats;
17
18#define SXGBE_DMA_BLENMAP_LSHIFT 1
19#define SXGBE_DMA_TXPBL_LSHIFT 16
20#define SXGBE_DMA_RXPBL_LSHIFT 16
21#define DEFAULT_DMA_PBL 8
22
23struct sxgbe_dma_ops {
24 /* DMA core initialization */
25 int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map);
26 void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst,
27 int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx,
28 int t_rzie, int r_rsize);
29 void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum);
30 void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
31 void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
32 void (*start_tx)(void __iomem *ioaddr, int tchannels);
33 void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum);
34 void (*stop_tx)(void __iomem *ioaddr, int tchannels);
35 void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum);
36 void (*start_rx)(void __iomem *ioaddr, int rchannels);
37 void (*stop_rx)(void __iomem *ioaddr, int rchannels);
38 int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no,
39 struct sxgbe_extra_stats *x);
40 int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no,
41 struct sxgbe_extra_stats *x);
42 /* Program the HW RX Watchdog */
43 void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt);
44};
45
46const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void);
47
48#endif /* __SXGBE_CORE_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
new file mode 100644
index 000000000000..1dce2b2e045b
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -0,0 +1,44 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/phy.h>
18
19#include "sxgbe_common.h"
20
21struct sxgbe_stats {
22 char stat_string[ETH_GSTRING_LEN];
23 int sizeof_stat;
24 int stat_offset;
25};
26
27#define SXGBE_STAT(m) \
28{ \
29 #m, \
30 FIELD_SIZEOF(struct sxgbe_extra_stats, m), \
31 offsetof(struct sxgbe_priv_data, xstats.m) \
32}
33
34static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
35};
36#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
37
38static const struct ethtool_ops sxgbe_ethtool_ops = {
39};
40
41void sxgbe_set_ethtool_ops(struct net_device *netdev)
42{
43 SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
44}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
new file mode 100644
index 000000000000..75ba57cfe7c0
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -0,0 +1,2052 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/clk.h>
16#include <linux/crc32.h>
17#include <linux/dma-mapping.h>
18#include <linux/etherdevice.h>
19#include <linux/ethtool.h>
20#include <linux/if.h>
21#include <linux/if_ether.h>
22#include <linux/if_vlan.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/kernel.h>
27#include <linux/mii.h>
28#include <linux/module.h>
29#include <linux/net_tstamp.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/platform_device.h>
33#include <linux/prefetch.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/tcp.h>
37#include <linux/sxgbe_platform.h>
38
39#include "sxgbe_common.h"
40#include "sxgbe_desc.h"
41#include "sxgbe_dma.h"
42#include "sxgbe_mtl.h"
43#include "sxgbe_reg.h"
44
45#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46#define JUMBO_LEN 9000
47
48/* Module parameters */
49#define TX_TIMEO 5000
50#define DMA_TX_SIZE 512
51#define DMA_RX_SIZE 1024
52#define TC_DEFAULT 64
53#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55#define SXGBE_DEFAULT_LPI_TIMER 1000
56
57static int debug = -1;
58
59module_param(debug, int, S_IRUGO | S_IWUSR);
60static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
61 NETIF_MSG_LINK | NETIF_MSG_IFUP |
62 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
63
64static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
65static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
66static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
67
68#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
69
70/**
71 * sxgbe_clk_csr_set - dynamically set the MDC clock
72 * @priv: driver private structure
73 * Description: this is to dynamically set the MDC clock according to the csr
74 * clock input.
75 */
76static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
77{
78 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
79
80 /* assign the proper divider, this will be used during
81 * mdio communication
82 */
83 if (clk_rate < SXGBE_CSR_F_150M)
84 priv->clk_csr = SXGBE_CSR_100_150M;
85 else if (clk_rate <= SXGBE_CSR_F_250M)
86 priv->clk_csr = SXGBE_CSR_150_250M;
87 else if (clk_rate <= SXGBE_CSR_F_300M)
88 priv->clk_csr = SXGBE_CSR_250_300M;
89 else if (clk_rate <= SXGBE_CSR_F_350M)
90 priv->clk_csr = SXGBE_CSR_300_350M;
91 else if (clk_rate <= SXGBE_CSR_F_400M)
92 priv->clk_csr = SXGBE_CSR_350_400M;
93 else if (clk_rate <= SXGBE_CSR_F_500M)
94 priv->clk_csr = SXGBE_CSR_400_500M;
95}
96
97/* minimum number of free TX descriptors required to wake up TX process */
98#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
99
100static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
101{
102 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
103}
104
105/**
106 * sxgbe_adjust_link
107 * @dev: net device structure
108 * Description: it adjusts the link parameters.
109 */
110static void sxgbe_adjust_link(struct net_device *dev)
111{
112 struct sxgbe_priv_data *priv = netdev_priv(dev);
113 struct phy_device *phydev = priv->phydev;
114 u8 new_state = 0;
115 u8 speed = 0xff;
116
117 if (!phydev)
118 return;
119
120 /* SXGBE is not supporting auto-negotiation and
121 * half duplex mode. so, not handling duplex change
122 * in this function. only handling speed and link status
123 */
124 if (phydev->link) {
125 if (phydev->speed != priv->speed) {
126 new_state = 1;
127 switch (phydev->speed) {
128 case SPEED_10000:
129 speed = SXGBE_SPEED_10G;
130 break;
131 case SPEED_2500:
132 speed = SXGBE_SPEED_2_5G;
133 break;
134 case SPEED_1000:
135 speed = SXGBE_SPEED_1G;
136 break;
137 default:
138 netif_err(priv, link, dev,
139 "Speed (%d) not supported\n",
140 phydev->speed);
141 }
142
143 priv->speed = phydev->speed;
144 priv->hw->mac->set_speed(priv->ioaddr, speed);
145 }
146
147 if (!priv->oldlink) {
148 new_state = 1;
149 priv->oldlink = 1;
150 }
151 } else if (priv->oldlink) {
152 new_state = 1;
153 priv->oldlink = 0;
154 priv->speed = SPEED_UNKNOWN;
155 }
156
157 if (new_state & netif_msg_link(priv))
158 phy_print_status(phydev);
159}
160
161/**
162 * sxgbe_init_phy - PHY initialization
163 * @dev: net device structure
164 * Description: it initializes the driver's PHY state, and attaches the PHY
165 * to the mac driver.
166 * Return value:
167 * 0 on success
168 */
169static int sxgbe_init_phy(struct net_device *ndev)
170{
171 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
172 char bus_id[MII_BUS_ID_SIZE];
173 struct phy_device *phydev;
174 struct sxgbe_priv_data *priv = netdev_priv(ndev);
175 int phy_iface = priv->plat->interface;
176
177 /* assign default link status */
178 priv->oldlink = 0;
179 priv->speed = SPEED_UNKNOWN;
180 priv->oldduplex = DUPLEX_UNKNOWN;
181
182 if (priv->plat->phy_bus_name)
183 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
184 priv->plat->phy_bus_name, priv->plat->bus_id);
185 else
186 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
187 priv->plat->bus_id);
188
189 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
190 priv->plat->phy_addr);
191 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
192
193 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
194
195 if (IS_ERR(phydev)) {
196 netdev_err(ndev, "Could not attach to PHY\n");
197 return PTR_ERR(phydev);
198 }
199
200 /* Stop Advertising 1000BASE Capability if interface is not GMII */
201 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
202 (phy_iface == PHY_INTERFACE_MODE_RMII))
203 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
204 SUPPORTED_1000baseT_Full);
205 if (phydev->phy_id == 0) {
206 phy_disconnect(phydev);
207 return -ENODEV;
208 }
209
210 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
211 __func__, phydev->phy_id, phydev->link);
212
213 /* save phy device in private structure */
214 priv->phydev = phydev;
215
216 return 0;
217}
218
219/**
220 * sxgbe_clear_descriptors: clear descriptors
221 * @priv: driver private structure
222 * Description: this function is called to clear the tx and rx descriptors
223 * in case of both basic and extended descriptors are used.
224 */
225static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
226{
227 int i, j;
228 unsigned int txsize = priv->dma_tx_size;
229 unsigned int rxsize = priv->dma_rx_size;
230
231 /* Clear the Rx/Tx descriptors */
232 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
233 for (i = 0; i < rxsize; i++)
234 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
235 priv->use_riwt, priv->mode,
236 (i == rxsize - 1));
237 }
238
239 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
240 for (i = 0; i < txsize; i++)
241 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
242 }
243}
244
245static int sxgbe_init_rx_buffers(struct net_device *dev,
246 struct sxgbe_rx_norm_desc *p, int i,
247 unsigned int dma_buf_sz,
248 struct sxgbe_rx_queue *rx_ring)
249{
250 struct sxgbe_priv_data *priv = netdev_priv(dev);
251 struct sk_buff *skb;
252
253 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
254 if (!skb)
255 return -ENOMEM;
256
257 rx_ring->rx_skbuff[i] = skb;
258 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
259 dma_buf_sz, DMA_FROM_DEVICE);
260
261 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
262 netdev_err(dev, "%s: DMA mapping error\n", __func__);
263 dev_kfree_skb_any(skb);
264 return -EINVAL;
265 }
266
267 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
268
269 return 0;
270}
271/**
272 * init_tx_ring - init the TX descriptor ring
273 * @dev: net device structure
274 * @tx_ring: ring to be intialised
275 * @tx_rsize: ring size
276 * Description: this function initializes the DMA TX descriptor
277 */
278static int init_tx_ring(struct device *dev, u8 queue_no,
279 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
280{
281 /* TX ring is not allcoated */
282 if (!tx_ring) {
283 dev_err(dev, "No memory for TX queue of SXGBE\n");
284 return -ENOMEM;
285 }
286
287 /* allocate memory for TX descriptors */
288 tx_ring->dma_tx = dma_zalloc_coherent(dev,
289 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
290 &tx_ring->dma_tx_phy, GFP_KERNEL);
291 if (!tx_ring->dma_tx)
292 return -ENOMEM;
293
294 /* allocate memory for TX skbuff array */
295 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
296 sizeof(dma_addr_t), GFP_KERNEL);
297 if (!tx_ring->tx_skbuff_dma)
298 goto dmamem_err;
299
300 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
301 sizeof(struct sk_buff *), GFP_KERNEL);
302
303 if (!tx_ring->tx_skbuff)
304 goto dmamem_err;
305
306 /* assign queue number */
307 tx_ring->queue_no = queue_no;
308
309 /* initalise counters */
310 tx_ring->dirty_tx = 0;
311 tx_ring->cur_tx = 0;
312
313 /* initalise TX queue lock */
314 spin_lock_init(&tx_ring->tx_lock);
315
316 return 0;
317
318dmamem_err:
319 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
320 tx_ring->dma_tx, tx_ring->dma_tx_phy);
321 return -ENOMEM;
322}
323
324/**
325 * free_rx_ring - free the RX descriptor ring
326 * @dev: net device structure
327 * @rx_ring: ring to be intialised
328 * @rx_rsize: ring size
329 * Description: this function initializes the DMA RX descriptor
330 */
331void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
332 int rx_rsize)
333{
334 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
335 rx_ring->dma_rx, rx_ring->dma_rx_phy);
336 kfree(rx_ring->rx_skbuff_dma);
337 kfree(rx_ring->rx_skbuff);
338}
339
340/**
341 * init_rx_ring - init the RX descriptor ring
342 * @dev: net device structure
343 * @rx_ring: ring to be intialised
344 * @rx_rsize: ring size
345 * Description: this function initializes the DMA RX descriptor
346 */
347static int init_rx_ring(struct net_device *dev, u8 queue_no,
348 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
349{
350 struct sxgbe_priv_data *priv = netdev_priv(dev);
351 int desc_index;
352 unsigned int bfsize = 0;
353 unsigned int ret = 0;
354
355 /* Set the max buffer size according to the MTU. */
356 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
357
358 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
359
360 /* RX ring is not allcoated */
361 if (rx_ring == NULL) {
362 netdev_err(dev, "No memory for RX queue\n");
363 goto error;
364 }
365
366 /* assign queue number */
367 rx_ring->queue_no = queue_no;
368
369 /* allocate memory for RX descriptors */
370 rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
371 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
372 &rx_ring->dma_rx_phy, GFP_KERNEL);
373
374 if (rx_ring->dma_rx == NULL)
375 goto error;
376
377 /* allocate memory for RX skbuff array */
378 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
379 sizeof(dma_addr_t), GFP_KERNEL);
380 if (rx_ring->rx_skbuff_dma == NULL)
381 goto dmamem_err;
382
383 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
384 sizeof(struct sk_buff *), GFP_KERNEL);
385 if (rx_ring->rx_skbuff == NULL)
386 goto rxbuff_err;
387
388 /* initialise the buffers */
389 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
390 struct sxgbe_rx_norm_desc *p;
391 p = rx_ring->dma_rx + desc_index;
392 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
393 bfsize, rx_ring);
394 if (ret)
395 goto err_init_rx_buffers;
396 }
397
398 /* initalise counters */
399 rx_ring->cur_rx = 0;
400 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
401 priv->dma_buf_sz = bfsize;
402
403 return 0;
404
405err_init_rx_buffers:
406 while (--desc_index >= 0)
407 free_rx_ring(priv->device, rx_ring, desc_index);
408 kfree(rx_ring->rx_skbuff);
409rxbuff_err:
410 kfree(rx_ring->rx_skbuff_dma);
411dmamem_err:
412 dma_free_coherent(priv->device,
413 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
414 rx_ring->dma_rx, rx_ring->dma_rx_phy);
415error:
416 return -ENOMEM;
417}
418/**
419 * free_tx_ring - free the TX descriptor ring
420 * @dev: net device structure
421 * @tx_ring: ring to be intialised
422 * @tx_rsize: ring size
423 * Description: this function initializes the DMA TX descriptor
424 */
425void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
426 int tx_rsize)
427{
428 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
429 tx_ring->dma_tx, tx_ring->dma_tx_phy);
430}
431
432/**
433 * init_dma_desc_rings - init the RX/TX descriptor rings
434 * @dev: net device structure
435 * Description: this function initializes the DMA RX/TX descriptors
436 * and allocates the socket buffers. It suppors the chained and ring
437 * modes.
438 */
439static int init_dma_desc_rings(struct net_device *netd)
440{
441 int queue_num, ret;
442 struct sxgbe_priv_data *priv = netdev_priv(netd);
443 int tx_rsize = priv->dma_tx_size;
444 int rx_rsize = priv->dma_rx_size;
445
446 /* Allocate memory for queue structures and TX descs */
447 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
448 ret = init_tx_ring(priv->device, queue_num,
449 priv->txq[queue_num], tx_rsize);
450 if (ret) {
451 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
452 goto txalloc_err;
453 }
454
455 /* save private pointer in each ring this
456 * pointer is needed during cleaing TX queue
457 */
458 priv->txq[queue_num]->priv_ptr = priv;
459 }
460
461 /* Allocate memory for queue structures and RX descs */
462 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
463 ret = init_rx_ring(netd, queue_num,
464 priv->rxq[queue_num], rx_rsize);
465 if (ret) {
466 netdev_err(netd, "RX DMA ring allocation failed!!\n");
467 goto rxalloc_err;
468 }
469
470 /* save private pointer in each ring this
471 * pointer is needed during cleaing TX queue
472 */
473 priv->rxq[queue_num]->priv_ptr = priv;
474 }
475
476 sxgbe_clear_descriptors(priv);
477
478 return 0;
479
480txalloc_err:
481 while (queue_num--)
482 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
483 return ret;
484
485rxalloc_err:
486 while (queue_num--)
487 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
488 return ret;
489}
490
491static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
492{
493 int dma_desc;
494 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
495 int tx_rsize = priv->dma_tx_size;
496
497 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
498 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
499
500 if (txqueue->tx_skbuff_dma[dma_desc])
501 dma_unmap_single(priv->device,
502 txqueue->tx_skbuff_dma[dma_desc],
503 priv->hw->desc->get_tx_len(tdesc),
504 DMA_TO_DEVICE);
505
506 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
507 txqueue->tx_skbuff[dma_desc] = NULL;
508 txqueue->tx_skbuff_dma[dma_desc] = 0;
509 }
510}
511
512
513static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
514{
515 int queue_num;
516
517 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
518 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
519 tx_free_ring_skbufs(tqueue);
520 }
521}
522
523static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
524{
525 int queue_num;
526 int tx_rsize = priv->dma_tx_size;
527 int rx_rsize = priv->dma_rx_size;
528
529 /* Release the DMA TX buffers */
530 dma_free_tx_skbufs(priv);
531
532 /* Release the TX ring memory also */
533 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
534 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
535 }
536
537 /* Release the RX ring memory also */
538 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
539 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
540 }
541}
542
543static int txring_mem_alloc(struct sxgbe_priv_data *priv)
544{
545 int queue_num;
546
547 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
548 priv->txq[queue_num] = devm_kmalloc(priv->device,
549 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
550 if (!priv->txq[queue_num])
551 return -ENOMEM;
552 }
553
554 return 0;
555}
556
557static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
558{
559 int queue_num;
560
561 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
562 priv->rxq[queue_num] = devm_kmalloc(priv->device,
563 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
564 if (!priv->rxq[queue_num])
565 return -ENOMEM;
566 }
567
568 return 0;
569}
570
571/**
572 * sxgbe_mtl_operation_mode - HW MTL operation mode
573 * @priv: driver private structure
574 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
575 * or Store-And-Forward capability.
576 */
577static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
578{
579 int queue_num;
580
581 /* TX/RX threshold control */
582 if (likely(priv->plat->force_sf_dma_mode)) {
583 /* set TC mode for TX QUEUES */
584 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
585 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
586 SXGBE_MTL_SFMODE);
587 priv->tx_tc = SXGBE_MTL_SFMODE;
588
589 /* set TC mode for RX QUEUES */
590 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
591 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
592 SXGBE_MTL_SFMODE);
593 priv->rx_tc = SXGBE_MTL_SFMODE;
594 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
595 /* set TC mode for TX QUEUES */
596 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
597 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
598 priv->tx_tc);
599 /* set TC mode for RX QUEUES */
600 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
601 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
602 priv->rx_tc);
603 } else {
604 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
605 }
606}
607
608/**
609 * sxgbe_tx_queue_clean:
610 * @priv: driver private structure
611 * Description: it reclaims resources after transmission completes.
612 */
613static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
614{
615 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
616 unsigned int tx_rsize = priv->dma_tx_size;
617 struct netdev_queue *dev_txq;
618 u8 queue_no = tqueue->queue_no;
619
620 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
621
622 spin_lock(&tqueue->tx_lock);
623
624 priv->xstats.tx_clean++;
625 while (tqueue->dirty_tx != tqueue->cur_tx) {
626 unsigned int entry = tqueue->dirty_tx % tx_rsize;
627 struct sk_buff *skb = tqueue->tx_skbuff[entry];
628 struct sxgbe_tx_norm_desc *p;
629
630 p = tqueue->dma_tx + entry;
631
632 /* Check if the descriptor is owned by the DMA. */
633 if (priv->hw->desc->get_tx_owner(p))
634 break;
635
636 if (netif_msg_tx_done(priv))
637 pr_debug("%s: curr %d, dirty %d\n",
638 __func__, tqueue->cur_tx, tqueue->dirty_tx);
639
640 if (likely(tqueue->tx_skbuff_dma[entry])) {
641 dma_unmap_single(priv->device,
642 tqueue->tx_skbuff_dma[entry],
643 priv->hw->desc->get_tx_len(p),
644 DMA_TO_DEVICE);
645 tqueue->tx_skbuff_dma[entry] = 0;
646 }
647
648 if (likely(skb)) {
649 dev_kfree_skb(skb);
650 tqueue->tx_skbuff[entry] = NULL;
651 }
652
653 priv->hw->desc->release_tx_desc(p);
654
655 tqueue->dirty_tx++;
656 }
657
658 /* wake up queue */
659 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
660 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
661 netif_tx_lock(priv->dev);
662 if (netif_tx_queue_stopped(dev_txq) &&
663 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
664 if (netif_msg_tx_done(priv))
665 pr_debug("%s: restart transmit\n", __func__);
666 netif_tx_wake_queue(dev_txq);
667 }
668 netif_tx_unlock(priv->dev);
669 }
670
671 spin_unlock(&tqueue->tx_lock);
672}
673
674/**
675 * sxgbe_tx_clean:
676 * @priv: driver private structure
677 * Description: it reclaims resources after transmission completes.
678 */
679static void sxgbe_tx_all_clean(struct sxgbe_priv_data *priv)
680{
681 u8 queue_num;
682
683 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
684 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
685
686 sxgbe_tx_queue_clean(tqueue);
687 }
688}
689
690/**
691 * sxgbe_restart_tx_queue: irq tx error mng function
692 * @priv: driver private structure
693 * Description: it cleans the descriptors and restarts the transmission
694 * in case of errors.
695 */
696static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
697{
698 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
699 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
700 queue_num);
701
702 /* stop the queue */
703 netif_tx_stop_queue(dev_txq);
704
705 /* stop the tx dma */
706 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
707
708 /* free the skbuffs of the ring */
709 tx_free_ring_skbufs(tx_ring);
710
711 /* initalise counters */
712 tx_ring->cur_tx = 0;
713 tx_ring->dirty_tx = 0;
714
715 /* start the tx dma */
716 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
717
718 priv->dev->stats.tx_errors++;
719
720 /* wakeup the queue */
721 netif_tx_wake_queue(dev_txq);
722}
723
724/**
725 * sxgbe_reset_all_tx_queues: irq tx error mng function
726 * @priv: driver private structure
727 * Description: it cleans all the descriptors and
728 * restarts the transmission on all queues in case of errors.
729 */
730static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
731{
732 int queue_num;
733
734 /* On TX timeout of net device, resetting of all queues
735 * may not be proper way, revisit this later if needed
736 */
737 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
738 sxgbe_restart_tx_queue(priv, queue_num);
739}
740
741/**
742 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
743 * @priv: driver private structure
744 * Description:
745 * new GMAC chip generations have a new register to indicate the
746 * presence of the optional feature/functions.
747 * This can be also used to override the value passed through the
748 * platform and necessary for old MAC10/100 and GMAC chips.
749 */
750static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
751{
752 int rval = 0;
753 struct sxgbe_hw_features *features = &priv->hw_cap;
754
755 /* Read First Capability Register CAP[0] */
756 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
757 if (rval) {
758 features->pmt_remote_wake_up =
759 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
760 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
761 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
762 features->tx_csum_offload =
763 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
764 features->rx_csum_offload =
765 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
766 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
767 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
768 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
769 }
770
771 /* Read First Capability Register CAP[1] */
772 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
773 if (rval) {
774 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
775 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
776 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
777 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
778 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
779 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
780 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
781 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
782 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
783 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
784 }
785
786 /* Read First Capability Register CAP[2] */
787 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
788 if (rval) {
789 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
790 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
791 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
792 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
793 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
794 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
795 }
796
797 return rval;
798}
799
800/**
801 * sxgbe_check_ether_addr: check if the MAC addr is valid
802 * @priv: driver private structure
803 * Description:
804 * it is to verify if the MAC address is valid, in case of failures it
805 * generates a random MAC address
806 */
807static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
808{
809 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
810 priv->hw->mac->get_umac_addr((void __iomem *)
811 priv->ioaddr,
812 priv->dev->dev_addr, 0);
813 if (!is_valid_ether_addr(priv->dev->dev_addr))
814 eth_hw_addr_random(priv->dev);
815 }
816 dev_info(priv->device, "device MAC address %pM\n",
817 priv->dev->dev_addr);
818}
819
820/**
821 * sxgbe_init_dma_engine: DMA init.
822 * @priv: driver private structure
823 * Description:
824 * It inits the DMA invoking the specific SXGBE callback.
825 * Some DMA parameters can be passed from the platform;
826 * in case of these are not passed a default is kept for the MAC or GMAC.
827 */
828static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
829{
830 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
831 int queue_num;
832
833 if (priv->plat->dma_cfg) {
834 pbl = priv->plat->dma_cfg->pbl;
835 fixed_burst = priv->plat->dma_cfg->fixed_burst;
836 burst_map = priv->plat->dma_cfg->burst_map;
837 }
838
839 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
840 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
841 fixed_burst, pbl,
842 (priv->txq[queue_num])->dma_tx_phy,
843 (priv->rxq[queue_num])->dma_rx_phy,
844 priv->dma_tx_size, priv->dma_rx_size);
845
846 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
847}
848
849/**
850 * sxgbe_init_mtl_engine: MTL init.
851 * @priv: driver private structure
852 * Description:
853 * It inits the MTL invoking the specific SXGBE callback.
854 */
855static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
856{
857 int queue_num;
858
859 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
860 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
861 priv->hw_cap.tx_mtl_qsize);
862 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
863 }
864}
865
866/**
867 * sxgbe_disable_mtl_engine: MTL disable.
868 * @priv: driver private structure
869 * Description:
870 * It disables the MTL queues by invoking the specific SXGBE callback.
871 */
872static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
873{
874 int queue_num;
875
876 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
877 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
878}
879
880
881/**
882 * sxgbe_tx_timer: mitigation sw timer for tx.
883 * @data: data pointer
884 * Description:
885 * This is the timer handler to directly invoke the sxgbe_tx_clean.
886 */
887static void sxgbe_tx_timer(unsigned long data)
888{
889 struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
890 sxgbe_tx_queue_clean(p);
891}
892
893/**
894 * sxgbe_init_tx_coalesce: init tx mitigation options.
895 * @priv: driver private structure
896 * Description:
897 * This inits the transmit coalesce parameters: i.e. timer rate,
898 * timer handler and default threshold used for enabling the
899 * interrupt on completion bit.
900 */
901static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
902{
903 u8 queue_num;
904
905 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
906 struct sxgbe_tx_queue *p = priv->txq[queue_num];
907 p->tx_coal_frames = SXGBE_TX_FRAMES;
908 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
909 init_timer(&p->txtimer);
910 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
911 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
912 p->txtimer.function = sxgbe_tx_timer;
913 add_timer(&p->txtimer);
914 }
915}
916
917static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
918{
919 u8 queue_num;
920
921 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
922 struct sxgbe_tx_queue *p = priv->txq[queue_num];
923 del_timer_sync(&p->txtimer);
924 }
925}
926
927/**
928 * sxgbe_open - open entry point of the driver
929 * @dev : pointer to the device structure.
930 * Description:
931 * This function is the open entry point of the driver.
932 * Return value:
933 * 0 on success and an appropriate (-)ve integer as defined in errno.h
934 * file on failure.
935 */
936static int sxgbe_open(struct net_device *dev)
937{
938 struct sxgbe_priv_data *priv = netdev_priv(dev);
939 int ret, queue_num;
940
941 clk_prepare_enable(priv->sxgbe_clk);
942
943 sxgbe_check_ether_addr(priv);
944
945 /* Init the phy */
946 ret = sxgbe_init_phy(dev);
947 if (ret) {
948 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
949 __func__, ret);
950 goto phy_error;
951 }
952
953 /* Create and initialize the TX/RX descriptors chains. */
954 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
955 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
956 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
957 priv->tx_tc = TC_DEFAULT;
958 priv->rx_tc = TC_DEFAULT;
959 init_dma_desc_rings(dev);
960
961 /* DMA initialization and SW reset */
962 ret = sxgbe_init_dma_engine(priv);
963 if (ret < 0) {
964 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
965 goto init_error;
966 }
967
968 /* MTL initialization */
969 sxgbe_init_mtl_engine(priv);
970
971 /* Copy the MAC addr into the HW */
972 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
973
974 /* Initialize the MAC Core */
975 priv->hw->mac->core_init(priv->ioaddr);
976
977 /* Request the IRQ lines */
978 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
979 IRQF_SHARED, dev->name, dev);
980 if (unlikely(ret < 0)) {
981 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
982 __func__, priv->irq, ret);
983 goto init_error;
984 }
985
986 /* Request TX DMA irq lines */
987 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
988 ret = devm_request_irq(priv->device,
989 (priv->txq[queue_num])->irq_no,
990 sxgbe_tx_interrupt, 0,
991 dev->name, priv->txq[queue_num]);
992 if (unlikely(ret < 0)) {
993 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
994 __func__, priv->irq, ret);
995 goto init_error;
996 }
997 }
998
999 /* Request RX DMA irq lines */
1000 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1001 ret = devm_request_irq(priv->device,
1002 (priv->rxq[queue_num])->irq_no,
1003 sxgbe_rx_interrupt, 0,
1004 dev->name, priv->rxq[queue_num]);
1005 if (unlikely(ret < 0)) {
1006 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1007 __func__, priv->irq, ret);
1008 goto init_error;
1009 }
1010 }
1011
1012 /* Enable the MAC Rx/Tx */
1013 priv->hw->mac->enable_tx(priv->ioaddr, true);
1014 priv->hw->mac->enable_rx(priv->ioaddr, true);
1015
1016 /* Set the HW DMA mode and the COE */
1017 sxgbe_mtl_operation_mode(priv);
1018
1019 /* Extra statistics */
1020 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1021
1022 priv->xstats.tx_threshold = priv->tx_tc;
1023 priv->xstats.rx_threshold = priv->rx_tc;
1024
1025 /* Start the ball rolling... */
1026 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1027 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1028 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1029
1030 if (priv->phydev)
1031 phy_start(priv->phydev);
1032
1033 /* initalise TX coalesce parameters */
1034 sxgbe_tx_init_coalesce(priv);
1035
1036 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1037 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1038 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1039 }
1040
1041 napi_enable(&priv->napi);
1042 netif_start_queue(dev);
1043
1044 return 0;
1045
1046init_error:
1047 free_dma_desc_resources(priv);
1048 if (priv->phydev)
1049 phy_disconnect(priv->phydev);
1050phy_error:
1051 clk_disable_unprepare(priv->sxgbe_clk);
1052
1053 return ret;
1054}
1055
1056/**
1057 * sxgbe_release - close entry point of the driver
1058 * @dev : device pointer.
1059 * Description:
1060 * This is the stop entry point of the driver.
1061 */
1062static int sxgbe_release(struct net_device *dev)
1063{
1064 struct sxgbe_priv_data *priv = netdev_priv(dev);
1065
1066 /* Stop and disconnect the PHY */
1067 if (priv->phydev) {
1068 phy_stop(priv->phydev);
1069 phy_disconnect(priv->phydev);
1070 priv->phydev = NULL;
1071 }
1072
1073 netif_tx_stop_all_queues(dev);
1074
1075 napi_disable(&priv->napi);
1076
1077 /* delete TX timers */
1078 sxgbe_tx_del_timer(priv);
1079
1080 /* Stop TX/RX DMA and clear the descriptors */
1081 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1082 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1083
1084 /* disable MTL queue */
1085 sxgbe_disable_mtl_engine(priv);
1086
1087 /* Release and free the Rx/Tx resources */
1088 free_dma_desc_resources(priv);
1089
1090 /* Disable the MAC Rx/Tx */
1091 priv->hw->mac->enable_tx(priv->ioaddr, false);
1092 priv->hw->mac->enable_rx(priv->ioaddr, false);
1093
1094 clk_disable_unprepare(priv->sxgbe_clk);
1095
1096 return 0;
1097}
1098
1099/**
1100 * sxgbe_xmit: Tx entry point of the driver
1101 * @skb : the socket buffer
1102 * @dev : device pointer
1103 * Description : this is the tx entry point of the driver.
1104 * It programs the chain or the ring and supports oversized frames
1105 * and SG feature.
1106 */
1107static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1108{
1109 unsigned int entry, frag_num;
1110 struct netdev_queue *dev_txq;
1111 unsigned txq_index = skb_get_queue_mapping(skb);
1112 struct sxgbe_priv_data *priv = netdev_priv(dev);
1113 unsigned int tx_rsize = priv->dma_tx_size;
1114 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1115 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1116 int nr_frags = skb_shinfo(skb)->nr_frags;
1117 int no_pagedlen = skb_headlen(skb);
1118 int is_jumbo = 0;
1119
1120 /* get the TX queue handle */
1121 dev_txq = netdev_get_tx_queue(dev, txq_index);
1122
1123 /* get the spinlock */
1124 spin_lock(&tqueue->tx_lock);
1125
1126 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1127 if (!netif_tx_queue_stopped(dev_txq)) {
1128 netif_tx_stop_queue(dev_txq);
1129 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1130 __func__, txq_index);
1131 }
1132 /* release the spin lock in case of BUSY */
1133 spin_unlock(&tqueue->tx_lock);
1134 return NETDEV_TX_BUSY;
1135 }
1136
1137 entry = tqueue->cur_tx % tx_rsize;
1138 tx_desc = tqueue->dma_tx + entry;
1139
1140 first_desc = tx_desc;
1141
1142 /* save the skb address */
1143 tqueue->tx_skbuff[entry] = skb;
1144
1145 if (!is_jumbo) {
1146 tx_desc->tdes01 = dma_map_single(priv->device, skb->data,
1147 no_pagedlen, DMA_TO_DEVICE);
1148 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1149 pr_err("%s: TX dma mapping failed!!\n", __func__);
1150
1151 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1152 no_pagedlen, 0);
1153 }
1154
1155 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1156 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1157 int len = skb_frag_size(frag);
1158
1159 entry = (++tqueue->cur_tx) % tx_rsize;
1160 tx_desc = tqueue->dma_tx + entry;
1161 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1162 DMA_TO_DEVICE);
1163
1164 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1165 tqueue->tx_skbuff[entry] = NULL;
1166
1167 /* prepare the descriptor */
1168 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1169 len, 0);
1170 /* memory barrier to flush descriptor */
1171 wmb();
1172
1173 /* set the owner */
1174 priv->hw->desc->set_tx_owner(tx_desc);
1175 }
1176
1177 /* close the descriptors */
1178 priv->hw->desc->close_tx_desc(tx_desc);
1179
1180 /* memory barrier to flush descriptor */
1181 wmb();
1182
1183 tqueue->tx_count_frames += nr_frags + 1;
1184 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1185 priv->hw->desc->clear_tx_ic(tx_desc);
1186 priv->xstats.tx_reset_ic_bit++;
1187 mod_timer(&tqueue->txtimer,
1188 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1189 } else {
1190 tqueue->tx_count_frames = 0;
1191 }
1192
1193 /* set owner for first desc */
1194 priv->hw->desc->set_tx_owner(first_desc);
1195
1196 /* memory barrier to flush descriptor */
1197 wmb();
1198
1199 tqueue->cur_tx++;
1200
1201 /* display current ring */
1202 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1203 __func__, tqueue->cur_tx % tx_rsize,
1204 tqueue->dirty_tx % tx_rsize, entry,
1205 first_desc, nr_frags);
1206
1207 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1208 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1209 __func__);
1210 netif_tx_stop_queue(dev_txq);
1211 }
1212
1213 dev->stats.tx_bytes += skb->len;
1214
1215 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1216 tqueue->hwts_tx_en)) {
1217 /* declare that device is doing timestamping */
1218 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1219 priv->hw->desc->tx_enable_tstamp(first_desc);
1220 }
1221
1222 if (!tqueue->hwts_tx_en)
1223 skb_tx_timestamp(skb);
1224
1225 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1226
1227 spin_unlock(&tqueue->tx_lock);
1228
1229 return NETDEV_TX_OK;
1230}
1231
1232/**
1233 * sxgbe_rx_refill: refill used skb preallocated buffers
1234 * @priv: driver private structure
1235 * Description : this is to reallocate the skb for the reception process
1236 * that is based on zero-copy.
1237 */
1238static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1239{
1240 unsigned int rxsize = priv->dma_rx_size;
1241 int bfsize = priv->dma_buf_sz;
1242 u8 qnum = priv->cur_rx_qnum;
1243
1244 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1245 priv->rxq[qnum]->dirty_rx++) {
1246 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1247 struct sxgbe_rx_norm_desc *p;
1248
1249 p = priv->rxq[qnum]->dma_rx + entry;
1250
1251 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1252 struct sk_buff *skb;
1253
1254 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1255
1256 if (unlikely(skb == NULL))
1257 break;
1258
1259 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1260 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1261 dma_map_single(priv->device, skb->data, bfsize,
1262 DMA_FROM_DEVICE);
1263
1264 p->rdes23.rx_rd_des23.buf2_addr =
1265 priv->rxq[qnum]->rx_skbuff_dma[entry];
1266 }
1267
1268 /* Added memory barrier for RX descriptor modification */
1269 wmb();
1270 priv->hw->desc->set_rx_owner(p);
1271 /* Added memory barrier for RX descriptor modification */
1272 wmb();
1273 }
1274}
1275
1276/**
1277 * sxgbe_rx: receive the frames from the remote host
1278 * @priv: driver private structure
1279 * @limit: napi bugget.
1280 * Description : this the function called by the napi poll method.
1281 * It gets all the frames inside the ring.
1282 */
1283static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1284{
1285 u8 qnum = priv->cur_rx_qnum;
1286 unsigned int rxsize = priv->dma_rx_size;
1287 unsigned int entry = priv->rxq[qnum]->cur_rx;
1288 unsigned int next_entry = 0;
1289 unsigned int count = 0;
1290
1291 while (count < limit) {
1292 struct sxgbe_rx_norm_desc *p;
1293 struct sk_buff *skb;
1294 int frame_len;
1295
1296 p = priv->rxq[qnum]->dma_rx + entry;
1297
1298 if (priv->hw->desc->get_rx_owner(p))
1299 break;
1300
1301 count++;
1302
1303 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1304 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1305
1306 /*TO DO read the status of the incoming frame */
1307
1308 skb = priv->rxq[qnum]->rx_skbuff[entry];
1309
1310 if (unlikely(!skb))
1311 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1312
1313 prefetch(skb->data - NET_IP_ALIGN);
1314 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1315
1316 frame_len = priv->hw->desc->get_rx_frame_len(p);
1317
1318 skb_put(skb, frame_len);
1319
1320 netif_receive_skb(skb);
1321
1322 entry = next_entry;
1323 }
1324
1325 sxgbe_rx_refill(priv);
1326
1327 return count;
1328}
1329
1330/**
1331 * sxgbe_poll - sxgbe poll method (NAPI)
1332 * @napi : pointer to the napi structure.
1333 * @budget : maximum number of packets that the current CPU can receive from
1334 * all interfaces.
1335 * Description :
1336 * To look at the incoming frames and clear the tx resources.
1337 */
1338static int sxgbe_poll(struct napi_struct *napi, int budget)
1339{
1340 struct sxgbe_priv_data *priv = container_of(napi,
1341 struct sxgbe_priv_data, napi);
1342 int work_done = 0;
1343 u8 qnum = priv->cur_rx_qnum;
1344
1345 priv->xstats.napi_poll++;
1346 /* first, clean the tx queues */
1347 sxgbe_tx_all_clean(priv);
1348
1349 work_done = sxgbe_rx(priv, budget);
1350 if (work_done < budget) {
1351 napi_complete(napi);
1352 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1353 }
1354
1355 return work_done;
1356}
1357
1358/**
1359 * sxgbe_tx_timeout
1360 * @dev : Pointer to net device structure
1361 * Description: this function is called when a packet transmission fails to
1362 * complete within a reasonable time. The driver will mark the error in the
1363 * netdev structure and arrange for the device to be reset to a sane state
1364 * in order to transmit a new packet.
1365 */
1366static void sxgbe_tx_timeout(struct net_device *dev)
1367{
1368 struct sxgbe_priv_data *priv = netdev_priv(dev);
1369
1370 sxgbe_reset_all_tx_queues(priv);
1371}
1372
1373/**
1374 * sxgbe_common_interrupt - main ISR
1375 * @irq: interrupt number.
1376 * @dev_id: to pass the net device pointer.
1377 * Description: this is the main driver interrupt service routine.
1378 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1379 * interrupts.
1380 */
1381static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1382{
1383 return IRQ_HANDLED;
1384}
1385
1386/**
1387 * sxgbe_tx_interrupt - TX DMA ISR
1388 * @irq: interrupt number.
1389 * @dev_id: to pass the net device pointer.
1390 * Description: this is the tx dma interrupt service routine.
1391 */
1392static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1393{
1394 int status;
1395 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1396 struct sxgbe_priv_data *priv = txq->priv_ptr;
1397
1398 /* get the channel status */
1399 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1400 &priv->xstats);
1401 /* check for normal path */
1402 if (likely((status & handle_tx)))
1403 napi_schedule(&priv->napi);
1404
1405 /* check for unrecoverable error */
1406 if (unlikely((status & tx_hard_error)))
1407 sxgbe_restart_tx_queue(priv, txq->queue_no);
1408
1409 /* check for TC configuration change */
1410 if (unlikely((status & tx_bump_tc) &&
1411 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1412 (priv->tx_tc < 512))) {
1413 /* step of TX TC is 32 till 128, otherwise 64 */
1414 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1415 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1416 txq->queue_no, priv->tx_tc);
1417 priv->xstats.tx_threshold = priv->tx_tc;
1418 }
1419
1420 return IRQ_HANDLED;
1421}
1422
1423/**
1424 * sxgbe_rx_interrupt - RX DMA ISR
1425 * @irq: interrupt number.
1426 * @dev_id: to pass the net device pointer.
1427 * Description: this is the rx dma interrupt service routine.
1428 */
1429static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1430{
1431 int status;
1432 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1433 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1434
1435 /* get the channel status */
1436 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1437 &priv->xstats);
1438
1439 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1440 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1441 __napi_schedule(&priv->napi);
1442 }
1443
1444 /* check for TC configuration change */
1445 if (unlikely((status & rx_bump_tc) &&
1446 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1447 (priv->rx_tc < 128))) {
1448 /* step of TC is 32 */
1449 priv->rx_tc += 32;
1450 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1451 rxq->queue_no, priv->rx_tc);
1452 priv->xstats.rx_threshold = priv->rx_tc;
1453 }
1454
1455 return IRQ_HANDLED;
1456}
1457
1458static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1459{
1460 u64 val = readl(ioaddr + reg_lo);
1461
1462 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1463
1464 return val;
1465}
1466
1467
1468/* sxgbe_get_stats64 - entry point to see statistical information of device
1469 * @dev : device pointer.
1470 * @stats : pointer to hold all the statistical information of device.
1471 * Description:
1472 * This function is a driver entry point whenever ifconfig command gets
1473 * executed to see device statistics. Statistics are number of
1474 * bytes sent or received, errors occured etc.
1475 * Return value:
1476 * This function returns various statistical information of device.
1477 */
1478static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
1479 struct rtnl_link_stats64 *stats)
1480{
1481 struct sxgbe_priv_data *priv = netdev_priv(dev);
1482 void __iomem *ioaddr = priv->ioaddr;
1483 u64 count;
1484
1485 spin_lock(&priv->stats_lock);
1486 /* Freeze the counter registers before reading value otherwise it may
1487 * get updated by hardware while we are reading them
1488 */
1489 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1490
1491 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1492 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1493 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1494
1495 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1496 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1497 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1498
1499 stats->multicast = sxgbe_get_stat64(ioaddr,
1500 SXGBE_MMC_RXMULTILO_GCNT_REG,
1501 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1502
1503 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1504 SXGBE_MMC_RXCRCERRLO_REG,
1505 SXGBE_MMC_RXCRCERRHI_REG);
1506
1507 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1508 SXGBE_MMC_RXLENERRLO_REG,
1509 SXGBE_MMC_RXLENERRHI_REG);
1510
1511 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1512 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1513 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1514
1515 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1516 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1517 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1518
1519 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1520 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1521
1522 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1523 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1524 stats->tx_errors = count - stats->tx_errors;
1525 stats->tx_packets = count;
1526 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1527 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1528 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1529 spin_unlock(&priv->stats_lock);
1530
1531 return stats;
1532}
1533
1534/* sxgbe_set_features - entry point to set offload features of the device.
1535 * @dev : device pointer.
1536 * @features : features which are required to be set.
1537 * Description:
1538 * This function is a driver entry point and called by Linux kernel whenever
1539 * any device features are set or reset by user.
1540 * Return value:
1541 * This function returns 0 after setting or resetting device features.
1542 */
1543static int sxgbe_set_features(struct net_device *dev,
1544 netdev_features_t features)
1545{
1546 struct sxgbe_priv_data *priv = netdev_priv(dev);
1547 netdev_features_t changed = dev->features ^ features;
1548 u32 ctrl;
1549
1550 if (changed & NETIF_F_RXCSUM) {
1551 ctrl = readl(priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
1552 if (features & NETIF_F_RXCSUM)
1553 ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
1554 else
1555 ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
1556 writel(ctrl, priv->ioaddr + SXGBE_CORE_RX_CONFIG_REG);
1557 }
1558
1559 return 0;
1560}
1561
1562/* sxgbe_change_mtu - entry point to change MTU size for the device.
1563 * @dev : device pointer.
1564 * @new_mtu : the new MTU size for the device.
1565 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1566 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1567 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1568 * Return value:
1569 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1570 * file on failure.
1571 */
1572static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1573{
1574 /* RFC 791, page 25, "Every internet module must be able to forward
1575 * a datagram of 68 octets without further fragmentation."
1576 */
1577 if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
1578 netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
1579 MIN_MTU, MAX_MTU);
1580 return -EINVAL;
1581 }
1582
1583 /* Return if the buffer sizes will not change */
1584 if (dev->mtu == new_mtu)
1585 return 0;
1586
1587 dev->mtu = new_mtu;
1588
1589 if (!netif_running(dev))
1590 return 0;
1591
1592 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1593 * changed then reinitilisation of the receive ring buffers need to be
1594 * done. Hence bring interface down and bring interface back up
1595 */
1596 sxgbe_release(dev);
1597 return sxgbe_open(dev);
1598}
1599
1600static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1601 unsigned int reg_n)
1602{
1603 unsigned long data;
1604
1605 data = (addr[5] << 8) | addr[4];
1606 /* For MAC Addr registers se have to set the Address Enable (AE)
1607 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1608 * is RO.
1609 */
1610 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1611 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1612 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1613}
1614
1615/**
1616 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1617 * a device. unicast, multicast addressing
1618 * @dev : pointer to the device structure
1619 * Description:
1620 * This function is a driver entry point which gets called by the kernel
1621 * whenever different receive mode like unicast, multicast and promiscuous
1622 * must be enabled/disabled.
1623 * Return value:
1624 * void.
1625 */
1626static void sxgbe_set_rx_mode(struct net_device *dev)
1627{
1628 struct sxgbe_priv_data *priv = netdev_priv(dev);
1629 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1630 unsigned int value = 0;
1631 u32 mc_filter[2];
1632 struct netdev_hw_addr *ha;
1633 int reg = 1;
1634
1635 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1636 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1637
1638 if (dev->flags & IFF_PROMISC) {
1639 value = SXGBE_FRAME_FILTER_PR;
1640
1641 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1642 (dev->flags & IFF_ALLMULTI)) {
1643 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
1644 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1645 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1646
1647 } else if (!netdev_mc_empty(dev)) {
1648 /* Hash filter for multicast */
1649 value = SXGBE_FRAME_FILTER_HMC;
1650
1651 memset(mc_filter, 0, sizeof(mc_filter));
1652 netdev_for_each_mc_addr(ha, dev) {
1653 /* The upper 6 bits of the calculated CRC are used to
1654 * index the contens of the hash table
1655 */
1656 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1657
1658 /* The most significant bit determines the register to
1659 * use (H/L) while the other 5 bits determine the bit
1660 * within the register.
1661 */
1662 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1663 }
1664 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1665 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1666 }
1667
1668 /* Handle multiple unicast addresses (perfect filtering) */
1669 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1670 /* Switch to promiscuous mode if more than 16 addrs
1671 * are required
1672 */
1673 value |= SXGBE_FRAME_FILTER_PR;
1674 else {
1675 netdev_for_each_uc_addr(ha, dev) {
1676 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1677 reg++;
1678 }
1679 }
1680#ifdef FRAME_FILTER_DEBUG
1681 /* Enable Receive all mode (to debug filtering_fail errors) */
1682 value |= SXGBE_FRAME_FILTER_RA;
1683#endif
1684 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1685
1686 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1687 readl(ioaddr + SXGBE_FRAME_FILTER),
1688 readl(ioaddr + SXGBE_HASH_HIGH),
1689 readl(ioaddr + SXGBE_HASH_LOW));
1690}
1691
1692/**
1693 * sxgbe_config - entry point for changing configuration mode passed on by
1694 * ifconfig
1695 * @dev : pointer to the device structure
1696 * @map : pointer to the device mapping structure
1697 * Description:
1698 * This function is a driver entry point which gets called by the kernel
1699 * whenever some device configuration is changed.
1700 * Return value:
1701 * This function returns 0 if success and appropriate error otherwise.
1702 */
1703static int sxgbe_config(struct net_device *dev, struct ifmap *map)
1704{
1705 struct sxgbe_priv_data *priv = netdev_priv(dev);
1706
1707 /* Can't act on a running interface */
1708 if (dev->flags & IFF_UP)
1709 return -EBUSY;
1710
1711 /* Don't allow changing the I/O address */
1712 if (map->base_addr != (unsigned long)priv->ioaddr) {
1713 netdev_warn(dev, "can't change I/O address\n");
1714 return -EOPNOTSUPP;
1715 }
1716
1717 /* Don't allow changing the IRQ */
1718 if (map->irq != priv->irq) {
1719 netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
1720 return -EOPNOTSUPP;
1721 }
1722
1723 return 0;
1724}
1725
1726#ifdef CONFIG_NET_POLL_CONTROLLER
1727/**
1728 * sxgbe_poll_controller - entry point for polling receive by device
1729 * @dev : pointer to the device structure
1730 * Description:
1731 * This function is used by NETCONSOLE and other diagnostic tools
1732 * to allow network I/O with interrupts disabled.
1733 * Return value:
1734 * Void.
1735 */
1736static void sxgbe_poll_controller(struct net_device *dev)
1737{
1738 struct sxgbe_priv_data *priv = netdev_priv(dev);
1739
1740 disable_irq(priv->irq);
1741 sxgbe_rx_interrupt(priv->irq, dev);
1742 enable_irq(priv->irq);
1743}
1744#endif
1745
1746/* sxgbe_ioctl - Entry point for the Ioctl
1747 * @dev: Device pointer.
1748 * @rq: An IOCTL specefic structure, that can contain a pointer to
1749 * a proprietary structure used to pass information to the driver.
1750 * @cmd: IOCTL command
1751 * Description:
1752 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1753 */
1754static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1755{
1756 struct sxgbe_priv_data *priv = netdev_priv(dev);
1757 int ret = -EOPNOTSUPP;
1758
1759 if (!netif_running(dev))
1760 return -EINVAL;
1761
1762 switch (cmd) {
1763 case SIOCGMIIPHY:
1764 case SIOCGMIIREG:
1765 case SIOCSMIIREG:
1766 if (!priv->phydev)
1767 return -EINVAL;
1768 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1769 break;
1770 default:
1771 break;
1772 }
1773
1774 return ret;
1775}
1776
1777static const struct net_device_ops sxgbe_netdev_ops = {
1778 .ndo_open = sxgbe_open,
1779 .ndo_start_xmit = sxgbe_xmit,
1780 .ndo_stop = sxgbe_release,
1781 .ndo_get_stats64 = sxgbe_get_stats64,
1782 .ndo_change_mtu = sxgbe_change_mtu,
1783 .ndo_set_features = sxgbe_set_features,
1784 .ndo_set_rx_mode = sxgbe_set_rx_mode,
1785 .ndo_tx_timeout = sxgbe_tx_timeout,
1786 .ndo_do_ioctl = sxgbe_ioctl,
1787 .ndo_set_config = sxgbe_config,
1788#ifdef CONFIG_NET_POLL_CONTROLLER
1789 .ndo_poll_controller = sxgbe_poll_controller,
1790#endif
1791 .ndo_set_mac_address = eth_mac_addr,
1792};
1793
1794/* Get the hardware ops */
1795void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
1796{
1797 ops_ptr->mac = sxgbe_get_core_ops();
1798 ops_ptr->desc = sxgbe_get_desc_ops();
1799 ops_ptr->dma = sxgbe_get_dma_ops();
1800 ops_ptr->mtl = sxgbe_get_mtl_ops();
1801
1802 /* set the MDIO communication Address/Data regisers */
1803 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
1804 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
1805
1806 /* Assigning the default link settings
1807 * no SXGBE defined default values to be set in registers,
1808 * so assigning as 0 for port and duplex
1809 */
1810 ops_ptr->link.port = 0;
1811 ops_ptr->link.duplex = 0;
1812 ops_ptr->link.speed = SXGBE_SPEED_10G;
1813}
1814
1815/**
1816 * sxgbe_hw_init - Init the GMAC device
1817 * @priv: driver private structure
1818 * Description: this function checks the HW capability
1819 * (if supported) and sets the driver's features.
1820 */
1821static void sxgbe_hw_init(struct sxgbe_priv_data * const priv)
1822{
1823 u32 ctrl_ids;
1824
1825 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
1826
1827 /* get the hardware ops */
1828 sxgbe_get_ops(priv->hw);
1829
1830 /* get the controller id */
1831 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
1832 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
1833 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
1834 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
1835 priv->hw->ctrl_uid, priv->hw->ctrl_id);
1836
1837 /* get the H/W features */
1838 if (!sxgbe_get_hw_features(priv))
1839 pr_info("Hardware features not found\n");
1840
1841 if (priv->hw_cap.tx_csum_offload)
1842 pr_info("TX Checksum offload supported\n");
1843
1844 if (priv->hw_cap.rx_csum_offload)
1845 pr_info("RX Checksum offload supported\n");
1846}
1847
1848/**
1849 * sxgbe_drv_probe
1850 * @device: device pointer
1851 * @plat_dat: platform data pointer
1852 * @addr: iobase memory address
1853 * Description: this is the main probe function used to
1854 * call the alloc_etherdev, allocate the priv structure.
1855 */
1856struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
1857 struct sxgbe_plat_data *plat_dat,
1858 void __iomem *addr)
1859{
1860 struct sxgbe_priv_data *priv;
1861 struct net_device *ndev;
1862 int ret;
1863
1864 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
1865 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
1866 if (!ndev)
1867 return NULL;
1868
1869 SET_NETDEV_DEV(ndev, device);
1870
1871 priv = netdev_priv(ndev);
1872 priv->device = device;
1873 priv->dev = ndev;
1874
1875 sxgbe_set_ethtool_ops(ndev);
1876 priv->plat = plat_dat;
1877 priv->ioaddr = addr;
1878
1879 /* Init MAC and get the capabilities */
1880 sxgbe_hw_init(priv);
1881
1882 /* allocate memory resources for Descriptor rings */
1883 ret = txring_mem_alloc(priv);
1884 if (ret)
1885 goto error_free_netdev;
1886
1887 ret = rxring_mem_alloc(priv);
1888 if (ret)
1889 goto error_free_netdev;
1890
1891 ndev->netdev_ops = &sxgbe_netdev_ops;
1892
1893 ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM;
1894 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1895 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
1896
1897 /* assign filtering support */
1898 ndev->priv_flags |= IFF_UNICAST_FLT;
1899
1900 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1901
1902 /* Rx Watchdog is available, enable depend on platform data */
1903 if (!priv->plat->riwt_off) {
1904 priv->use_riwt = 1;
1905 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
1906 }
1907
1908 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
1909
1910 spin_lock_init(&priv->stats_lock);
1911
1912 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
1913 if (IS_ERR(priv->sxgbe_clk)) {
1914 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
1915 __func__);
1916 goto error_clk_get;
1917 }
1918
1919 /* If a specific clk_csr value is passed from the platform
1920 * this means that the CSR Clock Range selection cannot be
1921 * changed at run-time and it is fixed. Viceversa the driver'll try to
1922 * set the MDC clock dynamically according to the csr actual
1923 * clock input.
1924 */
1925 if (!priv->plat->clk_csr)
1926 sxgbe_clk_csr_set(priv);
1927 else
1928 priv->clk_csr = priv->plat->clk_csr;
1929
1930 /* MDIO bus Registration */
1931 ret = sxgbe_mdio_register(ndev);
1932 if (ret < 0) {
1933 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
1934 __func__, priv->plat->bus_id);
1935 goto error_mdio_register;
1936 }
1937
1938 ret = register_netdev(ndev);
1939 if (ret) {
1940 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
1941 goto error_netdev_register;
1942 }
1943
1944 sxgbe_check_ether_addr(priv);
1945
1946 return priv;
1947
1948error_mdio_register:
1949 clk_put(priv->sxgbe_clk);
1950error_clk_get:
1951error_netdev_register:
1952 netif_napi_del(&priv->napi);
1953error_free_netdev:
1954 free_netdev(ndev);
1955
1956 return NULL;
1957}
1958
1959/**
1960 * sxgbe_drv_remove
1961 * @ndev: net device pointer
1962 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1963 * changes the link status, releases the DMA descriptor rings.
1964 */
1965int sxgbe_drv_remove(struct net_device *ndev)
1966{
1967 struct sxgbe_priv_data *priv = netdev_priv(ndev);
1968
1969 netdev_info(ndev, "%s: removing driver\n", __func__);
1970
1971 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1972 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1973
1974 priv->hw->mac->enable_tx(priv->ioaddr, false);
1975 priv->hw->mac->enable_rx(priv->ioaddr, false);
1976
1977 netif_napi_del(&priv->napi);
1978
1979 sxgbe_mdio_unregister(ndev);
1980
1981 unregister_netdev(ndev);
1982
1983 free_netdev(ndev);
1984
1985 return 0;
1986}
1987
1988#ifdef CONFIG_PM
1989int sxgbe_suspend(struct net_device *ndev)
1990{
1991 return 0;
1992}
1993
1994int sxgbe_resume(struct net_device *ndev)
1995{
1996 return 0;
1997}
1998
1999int sxgbe_freeze(struct net_device *ndev)
2000{
2001 return -ENOSYS;
2002}
2003
2004int sxgbe_restore(struct net_device *ndev)
2005{
2006 return -ENOSYS;
2007}
2008#endif /* CONFIG_PM */
2009
2010/* Driver is configured as Platform driver */
2011static int __init sxgbe_init(void)
2012{
2013 int ret;
2014
2015 ret = sxgbe_register_platform();
2016 if (ret)
2017 goto err;
2018 return 0;
2019err:
2020 pr_err("driver registration failed\n");
2021 return ret;
2022}
2023
2024static void __exit sxgbe_exit(void)
2025{
2026 sxgbe_unregister_platform();
2027}
2028
2029module_init(sxgbe_init);
2030module_exit(sxgbe_exit);
2031
2032#ifndef MODULE
2033static int __init sxgbe_cmdline_opt(char *str)
2034{
2035 return 0;
2036}
2037
2038__setup("sxgbeeth=", sxgbe_cmdline_opt);
2039#endif /* MODULE */
2040
2041
2042
2043MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2044
2045MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2046
2047MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2048MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2049MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2050MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2051
2052MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
new file mode 100644
index 000000000000..b0eb0a2c52ca
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -0,0 +1,251 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/mii.h>
17#include <linux/netdevice.h>
18#include <linux/platform_device.h>
19#include <linux/phy.h>
20#include <linux/slab.h>
21#include <linux/sxgbe_platform.h>
22
23#include "sxgbe_common.h"
24#include "sxgbe_reg.h"
25
26#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */
31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{
34 unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */
35
36 while (!time_after(jiffies, fin_time)) {
37 if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY))
38 return 0;
39 cpu_relax();
40 }
41
42 return -EBUSY;
43}
44
45static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
46 u16 phydata)
47{
48 u32 reg = phydata;
49
50 reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM |
51 ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY;
52 writel(reg, sp->ioaddr + sp->hw->mii.data);
53}
54
55static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
56 int phyreg, u16 phydata)
57{
58 u32 reg;
59
60 /* set mdio address register */
61 reg = ((phyreg >> 16) & 0x1f) << 21;
62 reg |= (phyaddr << 16) | (phyreg & 0xffff);
63 writel(reg, sp->ioaddr + sp->hw->mii.addr);
64
65 sxgbe_mdio_ctrl_data(sp, cmd, phydata);
66}
67
68static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
69 int phyreg, u16 phydata)
70{
71 u32 reg;
72
73 writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG);
74
75 /* set mdio address register */
76 reg = (phyaddr << 16) | (phyreg & 0x1f);
77 writel(reg, sp->ioaddr + sp->hw->mii.addr);
78
79 sxgbe_mdio_ctrl_data(sp, cmd, phydata);
80}
81
82static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
83 int phyreg, u16 phydata)
84{
85 const struct mii_regs *mii = &sp->hw->mii;
86 int rc;
87
88 rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
89 if (rc < 0)
90 return rc;
91
92 if (phyreg & MII_ADDR_C45) {
93 sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
94 } else {
95 /* Ports 0-3 only support C22. */
96 if (phyaddr >= 4)
97 return -ENODEV;
98
99 sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
100 }
101
102 return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
103}
104
105/**
106 * sxgbe_mdio_read
107 * @bus: points to the mii_bus structure
108 * @phyaddr: address of phy port
109 * @phyreg: address of register with in phy register
110 * Description: this function used for C45 and C22 MDIO Read
111 */
112static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
113{
114 struct net_device *ndev = bus->priv;
115 struct sxgbe_priv_data *priv = netdev_priv(ndev);
116 int rc;
117
118 rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
119 if (rc < 0)
120 return rc;
121
122 return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
123}
124
125/**
126 * sxgbe_mdio_write
127 * @bus: points to the mii_bus structure
128 * @phyaddr: address of phy port
129 * @phyreg: address of phy registers
130 * @phydata: data to be written into phy register
131 * Description: this function is used for C45 and C22 MDIO write
132 */
133static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
134 u16 phydata)
135{
136 struct net_device *ndev = bus->priv;
137 struct sxgbe_priv_data *priv = netdev_priv(ndev);
138
139 return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
140 phydata);
141}
142
143int sxgbe_mdio_register(struct net_device *ndev)
144{
145 struct mii_bus *mdio_bus;
146 struct sxgbe_priv_data *priv = netdev_priv(ndev);
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr;
149 int *irqlist;
150 bool act;
151
152 /* allocate the new mdio bus */
153 mdio_bus = mdiobus_alloc();
154 if (!mdio_bus) {
155 netdev_err(ndev, "%s: mii bus allocation failed\n", __func__);
156 return -ENOMEM;
157 }
158
159 if (mdio_data->irqs)
160 irqlist = mdio_data->irqs;
161 else
162 irqlist = priv->mii_irq;
163
164 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe";
166 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
169 mdio_bus->name, priv->plat->bus_id);
170 mdio_bus->priv = ndev;
171 mdio_bus->phy_mask = mdio_data->phy_mask;
172 mdio_bus->parent = priv->device;
173
174 /* register with kernel subsystem */
175 err = mdiobus_register(mdio_bus);
176 if (err != 0) {
177 netdev_err(ndev, "mdiobus register failed\n");
178 goto mdiobus_err;
179 }
180
181 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
182 struct phy_device *phy = mdio_bus->phy_map[phy_addr];
183
184 if (phy) {
185 char irq_num[4];
186 char *irq_str;
187 /* If an IRQ was provided to be assigned after
188 * the bus probe, do it here.
189 */
190 if ((mdio_data->irqs == NULL) &&
191 (mdio_data->probed_phy_irq > 0)) {
192 irqlist[phy_addr] = mdio_data->probed_phy_irq;
193 phy->irq = mdio_data->probed_phy_irq;
194 }
195
196 /* If we're going to bind the MAC to this PHY bus,
197 * and no PHY number was provided to the MAC,
198 * use the one probed here.
199 */
200 if (priv->plat->phy_addr == -1)
201 priv->plat->phy_addr = phy_addr;
202
203 act = (priv->plat->phy_addr == phy_addr);
204 switch (phy->irq) {
205 case PHY_POLL:
206 irq_str = "POLL";
207 break;
208 case PHY_IGNORE_INTERRUPT:
209 irq_str = "IGNORE";
210 break;
211 default:
212 sprintf(irq_num, "%d", phy->irq);
213 irq_str = irq_num;
214 break;
215 }
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : "");
219 }
220 }
221
222 if (!err) {
223 netdev_err(ndev, "PHY not found\n");
224 mdiobus_unregister(mdio_bus);
225 mdiobus_free(mdio_bus);
226 goto mdiobus_err;
227 }
228
229 priv->mii = mdio_bus;
230
231 return 0;
232
233mdiobus_err:
234 mdiobus_free(mdio_bus);
235 return err;
236}
237
238int sxgbe_mdio_unregister(struct net_device *ndev)
239{
240 struct sxgbe_priv_data *priv = netdev_priv(ndev);
241
242 if (!priv->mii)
243 return 0;
244
245 mdiobus_unregister(priv->mii);
246 priv->mii->priv = NULL;
247 mdiobus_free(priv->mii);
248 priv->mii = NULL;
249
250 return 0;
251}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
new file mode 100644
index 000000000000..324681c2bb74
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
@@ -0,0 +1,254 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/io.h>
16#include <linux/errno.h>
17#include <linux/export.h>
18#include <linux/jiffies.h>
19
20#include "sxgbe_mtl.h"
21#include "sxgbe_reg.h"
22
23static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
24 unsigned int raa)
25{
26 u32 reg_val;
27
28 reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
29 reg_val &= ETS_RST;
30
31 /* ETS Algorith */
32 switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
33 case ETS_WRR:
34 reg_val &= ETS_WRR;
35 break;
36 case ETS_WFQ:
37 reg_val |= ETS_WFQ;
38 break;
39 case ETS_DWRR:
40 reg_val |= ETS_DWRR;
41 break;
42 }
43 writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
44
45 switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
46 case RAA_SP:
47 reg_val &= RAA_SP;
48 break;
49 case RAA_WSP:
50 reg_val |= RAA_WSP;
51 break;
52 }
53 writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
54}
55
56/* For Dynamic DMA channel mapping for Rx queue */
57static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
58{
59 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
60 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
61 writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
62}
63
64static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
65 int queue_fifo)
66{
67 u32 fifo_bits, reg_val;
68
69 /* 0 means 256 bytes */
70 fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
71 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
72 reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
73 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
74}
75
76static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
77 int queue_fifo)
78{
79 u32 fifo_bits, reg_val;
80
81 /* 0 means 256 bytes */
82 fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
83 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
84 reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
85 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
86}
87
88static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
89{
90 u32 reg_val;
91
92 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
93 reg_val |= SXGBE_MTL_ENABLE_QUEUE;
94 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
95}
96
97static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
98{
99 u32 reg_val;
100
101 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
102 reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
103 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
104}
105
106static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
107 int threshold)
108{
109 u32 reg_val;
110
111 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
112 reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
113 reg_val |= (threshold << RX_FC_ACTIVE);
114
115 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
116}
117
118static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
119{
120 u32 reg_val;
121
122 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
123 reg_val |= SXGBE_MTL_ENABLE_FC;
124 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
125}
126
127static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
128 int threshold)
129{
130 u32 reg_val;
131
132 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
133 reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
134 reg_val |= (threshold << RX_FC_DEACTIVE);
135
136 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
137}
138
139static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
140{
141 u32 reg_val;
142
143 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
144 reg_val |= SXGBE_MTL_RXQ_OP_FEP;
145
146 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
147}
148
149static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
150{
151 u32 reg_val;
152
153 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
154 reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
155
156 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
157}
158
159static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
160{
161 u32 reg_val;
162
163 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
164 reg_val |= SXGBE_MTL_RXQ_OP_FUP;
165
166 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
167}
168
169static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
170{
171 u32 reg_val;
172
173 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
174 reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
175
176 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
177}
178
179
180static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
181 int tx_mode)
182{
183 u32 reg_val;
184
185 reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
186 /* TX specific MTL mode settings */
187 if (tx_mode == SXGBE_MTL_SFMODE) {
188 reg_val |= SXGBE_MTL_SFMODE;
189 } else {
190 /* set the TTC values */
191 if (tx_mode <= 64)
192 reg_val |= MTL_CONTROL_TTC_64;
193 else if (tx_mode <= 96)
194 reg_val |= MTL_CONTROL_TTC_96;
195 else if (tx_mode <= 128)
196 reg_val |= MTL_CONTROL_TTC_128;
197 else if (tx_mode <= 192)
198 reg_val |= MTL_CONTROL_TTC_192;
199 else if (tx_mode <= 256)
200 reg_val |= MTL_CONTROL_TTC_256;
201 else if (tx_mode <= 384)
202 reg_val |= MTL_CONTROL_TTC_384;
203 else
204 reg_val |= MTL_CONTROL_TTC_512;
205 }
206
207 /* write into TXQ operation register */
208 writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
209}
210
211static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
212 int rx_mode)
213{
214 u32 reg_val;
215
216 reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
217 /* RX specific MTL mode settings */
218 if (rx_mode == SXGBE_RX_MTL_SFMODE) {
219 reg_val |= SXGBE_RX_MTL_SFMODE;
220 } else {
221 if (rx_mode <= 64)
222 reg_val |= MTL_CONTROL_RTC_64;
223 else if (rx_mode <= 96)
224 reg_val |= MTL_CONTROL_RTC_96;
225 else if (rx_mode <= 128)
226 reg_val |= MTL_CONTROL_RTC_128;
227 }
228
229 /* write into RXQ operation register */
230 writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
231}
232
233static const struct sxgbe_mtl_ops mtl_ops = {
234 .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize,
235 .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize,
236 .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue,
237 .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue,
238 .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue,
239 .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode,
240 .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode,
241 .mtl_init = sxgbe_mtl_init,
242 .mtl_fc_active = sxgbe_mtl_fc_active,
243 .mtl_fc_deactive = sxgbe_mtl_fc_deactive,
244 .mtl_fc_enable = sxgbe_mtl_fc_enable,
245 .mtl_fep_enable = sxgbe_mtl_fep_enable,
246 .mtl_fep_disable = sxgbe_mtl_fep_disable,
247 .mtl_fup_enable = sxgbe_mtl_fup_enable,
248 .mtl_fup_disable = sxgbe_mtl_fup_disable
249};
250
251const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
252{
253 return &mtl_ops;
254}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
new file mode 100644
index 000000000000..7e4810c4137e
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
@@ -0,0 +1,104 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_MTL_H__
13#define __SXGBE_MTL_H__
14
15#define SXGBE_MTL_OPMODE_ESTMASK 0x3
16#define SXGBE_MTL_OPMODE_RAAMASK 0x1
17#define SXGBE_MTL_FCMASK 0x7
18#define SXGBE_MTL_TX_FIFO_DIV 256
19#define SXGBE_MTL_RX_FIFO_DIV 256
20
21#define SXGBE_MTL_RXQ_OP_FEP BIT(4)
22#define SXGBE_MTL_RXQ_OP_FUP BIT(3)
23#define SXGBE_MTL_ENABLE_FC 0x80
24
25#define ETS_WRR 0xFFFFFF9F
26#define ETS_RST 0xFFFFFF9F
27#define ETS_WFQ 0x00000020
28#define ETS_DWRR 0x00000040
29#define RAA_SP 0xFFFFFFFB
30#define RAA_WSP 0x00000004
31
32#define RX_QUEUE_DYNAMIC 0x80808080
33#define RX_FC_ACTIVE 8
34#define RX_FC_DEACTIVE 13
35
36enum ttc_control {
37 MTL_CONTROL_TTC_64 = 0x00000000,
38 MTL_CONTROL_TTC_96 = 0x00000020,
39 MTL_CONTROL_TTC_128 = 0x00000030,
40 MTL_CONTROL_TTC_192 = 0x00000040,
41 MTL_CONTROL_TTC_256 = 0x00000050,
42 MTL_CONTROL_TTC_384 = 0x00000060,
43 MTL_CONTROL_TTC_512 = 0x00000070,
44};
45
46enum rtc_control {
47 MTL_CONTROL_RTC_64 = 0x00000000,
48 MTL_CONTROL_RTC_96 = 0x00000002,
49 MTL_CONTROL_RTC_128 = 0x00000003,
50};
51
52enum flow_control_th {
53 MTL_FC_FULL_1K = 0x00000000,
54 MTL_FC_FULL_2K = 0x00000001,
55 MTL_FC_FULL_4K = 0x00000002,
56 MTL_FC_FULL_5K = 0x00000003,
57 MTL_FC_FULL_6K = 0x00000004,
58 MTL_FC_FULL_8K = 0x00000005,
59 MTL_FC_FULL_16K = 0x00000006,
60 MTL_FC_FULL_24K = 0x00000007,
61};
62
63struct sxgbe_mtl_ops {
64 void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg,
65 unsigned int raa);
66
67 void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
68 int mtl_fifo);
69
70 void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
71 int queue_fifo);
72
73 void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
74
75 void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
76
77 void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
78 int tx_mode);
79
80 void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
81 int rx_mode);
82
83 void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr);
84
85 void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
86 int threshold);
87
88 void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
89 int threshold);
90
91 void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
92
93 void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
94
95 void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num);
96
97 void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num);
98
99 void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num);
100};
101
102const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
103
104#endif /* __SXGBE_MTL_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
new file mode 100644
index 000000000000..f5a9de710052
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -0,0 +1,253 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/etherdevice.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/of.h>
20#include <linux/of_irq.h>
21#include <linux/of_net.h>
22#include <linux/phy.h>
23#include <linux/platform_device.h>
24#include <linux/sxgbe_platform.h>
25
26#include "sxgbe_common.h"
27#include "sxgbe_reg.h"
28
29#ifdef CONFIG_OF
30static int sxgbe_probe_config_dt(struct platform_device *pdev,
31 struct sxgbe_plat_data *plat,
32 const char **mac)
33{
34 struct device_node *np = pdev->dev.of_node;
35 struct sxgbe_dma_cfg *dma_cfg;
36
37 if (!np)
38 return -ENODEV;
39
40 *mac = of_get_mac_address(np);
41 plat->interface = of_get_phy_mode(np);
42
43 plat->bus_id = of_alias_get_id(np, "ethernet");
44 if (plat->bus_id < 0)
45 plat->bus_id = 0;
46
47 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
48 sizeof(*plat->mdio_bus_data),
49 GFP_KERNEL);
50
51 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
52 if (!dma_cfg)
53 return -ENOMEM;
54
55 plat->dma_cfg = dma_cfg;
56 of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl);
57 if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0)
58 dma_cfg->fixed_burst = true;
59
60 return 0;
61}
62#else
63static int sxgbe_probe_config_dt(struct platform_device *pdev,
64 struct sxgbe_plat_data *plat,
65 const char **mac)
66{
67 return -ENOSYS;
68}
69#endif /* CONFIG_OF */
70
71/**
72 * sxgbe_platform_probe
73 * @pdev: platform device pointer
74 * Description: platform_device probe function. It allocates
75 * the necessary resources and invokes the main to init
76 * the net device, register the mdio bus etc.
77 */
78static int sxgbe_platform_probe(struct platform_device *pdev)
79{
80 int ret;
81 int i, chan;
82 struct resource *res;
83 struct device *dev = &pdev->dev;
84 void __iomem *addr;
85 struct sxgbe_priv_data *priv = NULL;
86 struct sxgbe_plat_data *plat_dat = NULL;
87 const char *mac = NULL;
88 struct net_device *ndev = platform_get_drvdata(pdev);
89 struct device_node *node = dev->of_node;
90
91 /* Get memory resource */
92 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
93 if (!res)
94 goto err_out;
95
96 addr = devm_ioremap_resource(dev, res);
97 if (IS_ERR(addr))
98 return PTR_ERR(addr);
99
100 if (pdev->dev.of_node) {
101 plat_dat = devm_kzalloc(&pdev->dev,
102 sizeof(struct sxgbe_plat_data),
103 GFP_KERNEL);
104 if (!plat_dat)
105 return -ENOMEM;
106
107 ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
108 if (ret) {
109 pr_err("%s: main dt probe failed\n", __func__);
110 return ret;
111 }
112 }
113
114 /* Get MAC address if available (DT) */
115 if (mac)
116 ether_addr_copy(priv->dev->dev_addr, mac);
117
118 priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
119 if (!priv) {
120 pr_err("%s: main driver probe failed\n", __func__);
121 goto err_out;
122 }
123
124 /* Get the SXGBE common INT information */
125 priv->irq = irq_of_parse_and_map(node, 0);
126 if (priv->irq <= 0) {
127 dev_err(dev, "sxgbe common irq parsing failed\n");
128 goto err_drv_remove;
129 }
130
131 /* Get the TX/RX IRQ numbers */
132 for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
133 priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
134 if (priv->txq[i]->irq_no <= 0) {
135 dev_err(dev, "sxgbe tx irq parsing failed\n");
136 goto err_tx_irq_unmap;
137 }
138 }
139
140 for (i = 0; i < SXGBE_RX_QUEUES; i++) {
141 priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
142 if (priv->rxq[i]->irq_no <= 0) {
143 dev_err(dev, "sxgbe rx irq parsing failed\n");
144 goto err_rx_irq_unmap;
145 }
146 }
147
148 platform_set_drvdata(pdev, priv->dev);
149
150 pr_debug("platform driver registration completed\n");
151
152 return 0;
153
154err_rx_irq_unmap:
155 while (--i)
156 irq_dispose_mapping(priv->rxq[i]->irq_no);
157 i = SXGBE_TX_QUEUES;
158err_tx_irq_unmap:
159 while (--i)
160 irq_dispose_mapping(priv->txq[i]->irq_no);
161 irq_dispose_mapping(priv->irq);
162err_drv_remove:
163 sxgbe_drv_remove(ndev);
164err_out:
165 return -ENODEV;
166}
167
168/**
169 * sxgbe_platform_remove
170 * @pdev: platform device pointer
171 * Description: this function calls the main to free the net resources
172 * and calls the platforms hook and release the resources (e.g. mem).
173 */
174static int sxgbe_platform_remove(struct platform_device *pdev)
175{
176 struct net_device *ndev = platform_get_drvdata(pdev);
177 int ret = sxgbe_drv_remove(ndev);
178
179 return ret;
180}
181
182#ifdef CONFIG_PM
183static int sxgbe_platform_suspend(struct device *dev)
184{
185 struct net_device *ndev = dev_get_drvdata(dev);
186
187 return sxgbe_suspend(ndev);
188}
189
190static int sxgbe_platform_resume(struct device *dev)
191{
192 struct net_device *ndev = dev_get_drvdata(dev);
193
194 return sxgbe_resume(ndev);
195}
196
197int sxgbe_platform_freeze(struct device *dev)
198{
199 struct net_device *ndev = dev_get_drvdata(dev);
200
201 return sxgbe_freeze(ndev);
202}
203
204int sxgbe_platform_restore(struct device *dev)
205{
206 struct net_device *ndev = dev_get_drvdata(dev);
207
208 return sxgbe_restore(ndev);
209}
210
211static const struct dev_pm_ops sxgbe_platform_pm_ops = {
212 .suspend = sxgbe_platform_suspend,
213 .resume = sxgbe_platform_resume,
214 .freeze = sxgbe_platform_freeze,
215 .thaw = sxgbe_platform_restore,
216 .restore = sxgbe_platform_restore,
217};
218#else
219static const struct dev_pm_ops sxgbe_platform_pm_ops;
220#endif /* CONFIG_PM */
221
222static const struct of_device_id sxgbe_dt_ids[] = {
223 { .compatible = "samsung,sxgbe-v2.0a"},
224 { /* sentinel */ }
225};
226MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
227
228struct platform_driver sxgbe_platform_driver = {
229 .probe = sxgbe_platform_probe,
230 .remove = sxgbe_platform_remove,
231 .driver = {
232 .name = SXGBE_RESOURCE_NAME,
233 .owner = THIS_MODULE,
234 .pm = &sxgbe_platform_pm_ops,
235 .of_match_table = of_match_ptr(sxgbe_dt_ids),
236 },
237};
238
239int sxgbe_register_platform(void)
240{
241 int err;
242
243 err = platform_driver_register(&sxgbe_platform_driver);
244 if (err)
245 pr_err("failed to register the platform driver\n");
246
247 return err;
248}
249
250void sxgbe_unregister_platform(void)
251{
252 platform_driver_unregister(&sxgbe_platform_driver);
253}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
new file mode 100644
index 000000000000..d1cd9ac1b062
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -0,0 +1,477 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_REGMAP_H__
13#define __SXGBE_REGMAP_H__
14
15/* SXGBE MAC Registers */
16#define SXGBE_CORE_TX_CONFIG_REG 0x0000
17#define SXGBE_CORE_RX_CONFIG_REG 0x0004
18#define SXGBE_CORE_PKT_FILTER_REG 0x0008
19#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C
20#define SXGBE_CORE_HASH_TABLE_REG0 0x0010
21#define SXGBE_CORE_HASH_TABLE_REG1 0x0014
22#define SXGBE_CORE_HASH_TABLE_REG2 0x0018
23#define SXGBE_CORE_HASH_TABLE_REG3 0x001C
24#define SXGBE_CORE_HASH_TABLE_REG4 0x0020
25#define SXGBE_CORE_HASH_TABLE_REG5 0x0024
26#define SXGBE_CORE_HASH_TABLE_REG6 0x0028
27#define SXGBE_CORE_HASH_TABLE_REG7 0x002C
28/* VLAN Specific Registers */
29#define SXGBE_CORE_VLAN_TAG_REG 0x0050
30#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058
31#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060
32#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064
33#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C
34
35/* Flow Contol Registers */
36#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070
37#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074
38#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078
39#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C
40#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080
41#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084
42#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088
43#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C
44#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090
45#define SXGBE_CORE_RX_CTL0_REG 0x00A0
46#define SXGBE_CORE_RX_CTL1_REG 0x00A4
47#define SXGBE_CORE_RX_CTL2_REG 0x00A8
48#define SXGBE_CORE_RX_CTL3_REG 0x00AC
49
50/* Interrupt Registers */
51#define SXGBE_CORE_INT_STATUS_REG 0x00B0
52#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
53#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8
54#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0
55#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4
56#define SXGBE_CORE_VERSION_REG 0x0110
57#define SXGBE_CORE_DEBUG_REG 0x0114
58#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4)
59
60/* SMA(MDIO) module registers */
61#define SXGBE_MDIO_SCMD_ADD_REG 0x0200
62#define SXGBE_MDIO_SCMD_DATA_REG 0x0204
63#define SXGBE_MDIO_CCMD_WADD_REG 0x0208
64#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C
65#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210
66#define SXGBE_MDIO_INT_STATUS_REG 0x0214
67#define SXGBE_MDIO_INT_ENABLE_REG 0x0218
68#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C
69#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220
70
71/* port specific, addr = 0-3 */
72#define SXGBE_MDIO_DEV_BASE_REG 0x0230
73#define SXGBE_MDIO_PORT_DEV_REG(addr) \
74 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0)
75#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \
76 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4)
77#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \
78 (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8)
79
80#define SXGBE_CORE_GPIO_CTL_REG 0x0278
81#define SXGBE_CORE_GPIO_STATUS_REG 0x027C
82
83/* Address registers for filtering */
84#define SXGBE_CORE_ADD_BASE_REG 0x0300
85
86/* addr = 0-31 */
87#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \
88 (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0)
89#define SXGBE_CORE_ADD_LOWOFFSET(addr) \
90 (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4)
91
92/* SXGBE MMC registers */
93#define SXGBE_MMC_CTL_REG 0x0800
94#define SXGBE_MMC_RXINT_STATUS_REG 0x0804
95#define SXGBE_MMC_TXINT_STATUS_REG 0x0808
96#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C
97#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810
98
99/* TX specific counters */
100#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814
101#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818
102#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C
103#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820
104#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824
105#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828
106#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C
107#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830
108#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834
109#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838
110#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C
111#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840
112#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844
113#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848
114#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C
115#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850
116#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854
117#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858
118#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C
119#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860
120#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864
121#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868
122#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C
123#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870
124#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874
125#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878
126#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C
127#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880
128#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884
129#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888
130#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C
131#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890
132#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894
133#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898
134#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C
135#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0
136
137/* RX specific counters */
138#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900
139#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904
140#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908
141#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C
142#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910
143#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914
144#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918
145#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C
146#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920
147#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924
148#define SXGBE_MMC_RXCRCERRLO_REG 0x0928
149#define SXGBE_MMC_RXCRCERRHI_REG 0x092C
150#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930
151#define SXGBE_MMC_RXJABBERERR_REG 0x0934
152#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938
153#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C
154#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940
155#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944
156#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948
157#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C
158#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950
159#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954
160#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958
161#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C
162#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960
163#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964
164#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968
165#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C
166#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970
167#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974
168#define SXGBE_MMC_RXLENERRLO_REG 0x0978
169#define SXGBE_MMC_RXLENERRHI_REG 0x097C
170#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980
171#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984
172#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988
173#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C
174#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990
175#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994
176#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998
177#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C
178#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0
179
180/* L3/L4 function registers */
181#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
182#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
183#define SXGBE_CORE_L34_DATA_REG 0x0C04
184
185/* ARP registers */
186#define SXGBE_CORE_ARP_ADD_REG 0x0C10
187
188/* RSS registers */
189#define SXGBE_CORE_RSS_CTL_REG 0x0C80
190#define SXGBE_CORE_RSS_ADD_REG 0x0C88
191#define SXGBE_CORE_RSS_DATA_REG 0x0C8C
192
193/* IEEE 1588 registers */
194#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00
195#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04
196#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C
197#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10
198#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14
199#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18
200#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C
201#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20
202#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30
203#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34
204
205/* Auxiliary registers */
206#define SXGBE_CORE_AUX_CTL_REG 0x0D40
207#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48
208#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C
209#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50
210#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54
211#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58
212#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C
213#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60
214#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64
215
216/* PPS registers */
217#define SXGBE_CORE_PPS_CTL_REG 0x0D70
218#define SXGBE_CORE_PPS_BASE 0x0D80
219
220/* addr = 0 - 3 */
221#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \
222 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0)
223#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \
224 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4)
225#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \
226 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8)
227#define SXGBE_CORE_PPS_WIDTH_REG(addr) \
228 (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC)
229#define SXGBE_CORE_PTO_CTL_REG 0x0DC0
230#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4
231#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8
232#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC
233#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0
234
235/* SXGBE MTL Registers */
236#define SXGBE_MTL_BASE_REG 0x1000
237#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000)
238#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008)
239#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C)
240#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010)
241#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020)
242#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030)
243#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034)
244#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038)
245#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040)
246#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044)
247
248/* TC/Queue registers, qnum=0-15 */
249#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100)
250#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \
251 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00)
252#define SXGBE_MTL_SFMODE BIT(1)
253#define SXGBE_MTL_FIFO_LSHIFT 16
254#define SXGBE_MTL_ENABLE_QUEUE 0x00000008
255#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \
256 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04)
257#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \
258 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08)
259#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \
260 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10)
261#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \
262 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14)
263#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \
264 (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18)
265
266#define SXGBE_MTL_TC_RXBASE_REG 0x1140
267#define SXGBE_RX_MTL_SFMODE BIT(5)
268#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \
269 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00)
270#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \
271 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04)
272#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \
273 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08)
274#define SXGBE_MTL_RXQ_CTL_REG(qnum) \
275 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C)
276#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \
277 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30)
278#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \
279 (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34)
280
281/* SXGBE DMA Registers */
282#define SXGBE_DMA_BASE_REG 0x3000
283#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000)
284#define SXGBE_DMA_SOFT_RESET BIT(0)
285#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004)
286#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0)
287#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11)
288#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008)
289#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010)
290#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018)
291#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020)
292#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024)
293#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028)
294#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C)
295#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030)
296#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034)
297
298/* Channel Registers, cha_num = 0-15 */
299#define SXGBE_DMA_CHA_BASE_REG \
300 (SXGBE_DMA_BASE_REG + 0x0100)
301#define SXGBE_DMA_CHA_CTL_REG(cha_num) \
302 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00)
303#define SXGBE_DMA_PBL_X8MODE BIT(16)
304#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12)
305#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \
306 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04)
307#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \
308 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08)
309#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \
310 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10)
311#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \
312 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14)
313#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \
314 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18)
315#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \
316 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C)
317#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \
318 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24)
319#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \
320 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C)
321#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \
322 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30)
323#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \
324 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34)
325#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \
326 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38)
327#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \
328 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C)
329#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \
330 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44)
331#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \
332 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C)
333#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \
334 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50)
335#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \
336 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54)
337#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \
338 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58)
339#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \
340 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C)
341#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \
342 (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60)
343
344/* TX DMA control register specific */
345#define SXGBE_TX_START_DMA BIT(0)
346
347/* sxgbe tx configuration register bitfields */
348#define SXGBE_SPEED_10G 0x0
349#define SXGBE_SPEED_2_5G 0x1
350#define SXGBE_SPEED_1G 0x2
351#define SXGBE_SPEED_LSHIFT 29
352
353#define SXGBE_TX_ENABLE BIT(0)
354#define SXGBE_TX_DISDIC_ALGO BIT(1)
355#define SXGBE_TX_JABBER_DISABLE BIT(16)
356
357/* sxgbe rx configuration register bitfields */
358#define SXGBE_RX_ENABLE BIT(0)
359#define SXGBE_RX_ACS_ENABLE BIT(1)
360#define SXGBE_RX_WATCHDOG_DISABLE BIT(7)
361#define SXGBE_RX_JUMBPKT_ENABLE BIT(8)
362#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9)
363#define SXGBE_RX_LOOPBACK_ENABLE BIT(10)
364#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31)
365
366/* sxgbe vlan Tag Register bitfields */
367#define SXGBE_VLAN_SVLAN_ENABLE BIT(18)
368#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26)
369#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27)
370
371/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields
372 * Below fields same for Inner VLAN Tag Inclusion
373 * Register(0x0064) register
374 */
375enum vlan_tag_ctl_tx {
376 VLAN_TAG_TX_NOP,
377 VLAN_TAG_TX_DEL,
378 VLAN_TAG_TX_INSERT,
379 VLAN_TAG_TX_REPLACE
380};
381#define SXGBE_VLAN_PRTY_CTL BIT(18)
382#define SXGBE_VLAN_CSVL_CTL BIT(19)
383
384/* SXGBE TX Q Flow Control Register bitfields */
385#define SXGBE_TX_FLOW_CTL_FCB BIT(0)
386#define SXGBE_TX_FLOW_CTL_TFB BIT(1)
387
388/* SXGBE RX Q Flow Control Register bitfields */
389#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0)
390#define SXGBE_RX_UNICAST_DETECT BIT(1)
391#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8)
392
393/* sxgbe rx Q control0 register bitfields */
394#define SXGBE_RX_Q_ENABLE 0x2
395
396/* SXGBE hardware features bitfield specific */
397/* Capability Register 0 */
398#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1)
399#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4)
400#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5)
401#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6)
402#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7)
403#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8)
404#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9)
405#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12)
406#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13)
407#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14)
408#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16)
409#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18)
410#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25)
411#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27)
412
413/* Capability Register 1 */
414#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F))
415#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6)
416#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13)
417#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16)
418#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17)
419#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18)
420#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19)
421#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20)
422#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24)
423#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27)
424
425/* Capability Register 2 */
426#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F))
427#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6)
428#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12)
429#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18)
430#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24)
431#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28)
432
433/* DMAchannel interrupt enable specific */
434/* DMA Normal interrupt */
435#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */
436#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */
437#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */
438#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */
439
440#define SXGBE_DMA_INT_NORMAL \
441 (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \
442 SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE)
443
444/* DMA Abnormal interrupt */
445#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */
446#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */
447#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */
448#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */
449#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */
450#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */
451
452#define SXGBE_DMA_INT_ABNORMAL \
453 (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \
454 SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \
455 SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE)
456
457#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL)
458
459/* DMA channel interrupt status specific */
460#define SXGBE_DMA_INT_STATUS_REB2 BIT(21)
461#define SXGBE_DMA_INT_STATUS_REB1 BIT(20)
462#define SXGBE_DMA_INT_STATUS_REB0 BIT(19)
463#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18)
464#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17)
465#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16)
466#define SXGBE_DMA_INT_STATUS_NIS BIT(15)
467#define SXGBE_DMA_INT_STATUS_AIS BIT(14)
468#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13)
469#define SXGBE_DMA_INT_STATUS_FBE BIT(12)
470#define SXGBE_DMA_INT_STATUS_RPS BIT(8)
471#define SXGBE_DMA_INT_STATUS_RBU BIT(7)
472#define SXGBE_DMA_INT_STATUS_RI BIT(6)
473#define SXGBE_DMA_INT_STATUS_TBU BIT(2)
474#define SXGBE_DMA_INT_STATUS_TPS BIT(1)
475#define SXGBE_DMA_INT_STATUS_TI BIT(0)
476
477#endif /* __SXGBE_REGMAP_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
new file mode 100644
index 000000000000..51c32194ba88
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
@@ -0,0 +1,91 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/phy.h>
16#include "sxgbe_common.h"
17#include "sxgbe_xpcs.h"
18
19static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
20{
21 u32 value;
22 struct sxgbe_priv_data *priv = netdev_priv(ndev);
23
24 value = readl(priv->ioaddr + XPCS_OFFSET + reg);
25
26 return value;
27}
28
29static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
30{
31 struct sxgbe_priv_data *priv = netdev_priv(ndev);
32
33 writel(data, priv->ioaddr + XPCS_OFFSET + reg);
34
35 return 0;
36}
37
38int sxgbe_xpcs_init(struct net_device *ndev)
39{
40 u32 value;
41
42 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
43 /* 10G XAUI mode */
44 sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
45 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
46 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
47 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
48
49 do {
50 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
51 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
52
53 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
54 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
55
56 do {
57 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
58 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
59
60 return 0;
61}
62
63int sxgbe_xpcs_init_1G(struct net_device *ndev)
64{
65 int value;
66
67 /* 10GBASE-X PCS (1G) mode */
68 sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
69 sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
70 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
71 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
72
73 value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
74 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
75 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
76 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
77 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
78
79 do {
80 value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
81 } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
82
83 value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
84 sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
85
86 /* Auto Negotiation cluase 37 enable */
87 value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
88 sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
89
90 return 0;
91}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
new file mode 100644
index 000000000000..6b26a50724d3
--- /dev/null
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
@@ -0,0 +1,38 @@
1/* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Byungho An <bh74.an@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __SXGBE_XPCS_H__
13#define __SXGBE_XPCS_H__
14
15/* XPCS Registers */
16#define XPCS_OFFSET 0x1A060000
17#define SR_PCS_MMD_CONTROL1 0x030000
18#define SR_PCS_CONTROL2 0x030007
19#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004
20#define VR_PCS_MMD_DIGITAL_STATUS 0x038010
21#define SR_MII_MMD_CONTROL 0x1F0000
22#define SR_MII_MMD_AN_ADV 0x1F0004
23#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005
24#define VR_MII_MMD_AN_CONTROL 0x1F8001
25#define VR_MII_MMD_AN_INT_STATUS 0x1F8002
26
27#define XPCS_QSEQ_STATE_STABLE 0x10
28#define XPCS_QSEQ_STATE_MPLLOFF 0x1c
29#define XPCS_TYPE_SEL_R 0x00
30#define XPCS_TYPE_SEL_X 0x01
31#define XPCS_TYPE_SEL_W 0x02
32#define XPCS_XAUI_MODE 0x00
33#define XPCS_RXAUI_MODE 0x01
34
35int sxgbe_xpcs_init(struct net_device *ndev);
36int sxgbe_xpcs_init_1G(struct net_device *ndev);
37
38#endif /* __SXGBE_XPCS_H__ */