aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-04-02 20:23:10 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-02 20:23:10 -0400
commit833716e0ed026c1abc7dd0e85a6932b855a8e56b (patch)
tree811ae318c15164b37b1facdef2b91b74e6ee9983
parent5ada37b53ea2b310df143b2c7d6c48fbf14d5cb8 (diff)
parent91979b9db86340d7cd49392a498663fb1ac74639 (diff)
Merge branch 'stmmac-GMAC4.x'
Alexandre TORGUE says: ==================== Enhance stmmac driver to support GMAC4.x IP This is a subset of patch to enhance current stmmac driver to support new GMAC4.x chips. New set of callbacks is defined to support this new family: descriptors, dma, core. One of main changes of GMAC 4.xx IP is descriptors management. -descriptors are only used in ring mode. -A descriptor is composed of 4 32bits registers (no more extended descriptors) -descriptor mechanism (Tx for example, but it is exactly the same for RX): -useful registers: -DMA_CH#_TxDesc_Ring_Len: length of transmit descriptor ring -DMA_CH#_TxDesc_List_Address: start address of the ring -DMA_CH#_TxDesc_Tail_Pointer: address of the last descriptor to send + 1. -DMA_CH#_TxDesc_Current_App_TxDesc: address of the current descriptor -The descriptor Tail Pointer register contains the pointer to the descriptor address (N). The base address and the current descriptor decide the address of the current descriptor that the DMA can process. The descriptors up to one location less than the one indicated by the descriptor tail pointer (N-1) are owned by the DMA. The DMA continues to process the descriptors until the following condition occurs: "current descriptor pointer == Descriptor Tail pointer" Then the DMA goes into suspend mode. The application must perform a write to descriptor tail pointer register and update the tail pointer to have the following condition and to start a new transfer: "current descriptor pointer < Descriptor tail pointer" The DMA automatically wraps around the base address when the end of ring is reached. New features are available on IP: -TSO (TCP Segmentation Offload) for TX only -Split header: to have header and payload in 2 different buffers (not yet implemented) Below some throughput figures obtained on some boxes: iperf (mbps) -------------------------------------- tcp udp tx rx tx rx ----------------- GMAC4.x 935 930 750 800 Note: There is a change in 4.10a databook on bitfield mapping of DMA_CHANx_INTR_ENA register. This requires to have é diffrent set of callbacks between IP 4.00a and 4.10a. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt2
-rw-r--r--Documentation/networking/stmmac.txt44
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h255
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c407
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c396
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c354
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c349
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c643
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--include/linux/stmmac.h2
24 files changed, 2821 insertions, 369 deletions
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 6605d19601c2..4d302db657c0 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -59,6 +59,8 @@ Optional properties:
59 - snps,fb: fixed-burst 59 - snps,fb: fixed-burst
60 - snps,mb: mixed-burst 60 - snps,mb: mixed-burst
61 - snps,rb: rebuild INCRx Burst 61 - snps,rb: rebuild INCRx Burst
62 - snps,tso: this enables the TSO feature otherwise it will be managed by
63 MAC HW capability register.
62- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. 64- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
63 65
64Examples: 66Examples:
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index d64a14714236..671fe3dd56d3 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -1,6 +1,6 @@
1 STMicroelectronics 10/100/1000 Synopsys Ethernet driver 1 STMicroelectronics 10/100/1000 Synopsys Ethernet driver
2 2
3Copyright (C) 2007-2014 STMicroelectronics Ltd 3Copyright (C) 2007-2015 STMicroelectronics Ltd
4Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 4Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
5 5
6This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers 6This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
@@ -138,6 +138,8 @@ struct plat_stmmacenet_data {
138 int (*init)(struct platform_device *pdev, void *priv); 138 int (*init)(struct platform_device *pdev, void *priv);
139 void (*exit)(struct platform_device *pdev, void *priv); 139 void (*exit)(struct platform_device *pdev, void *priv);
140 void *bsp_priv; 140 void *bsp_priv;
141 int has_gmac4;
142 bool tso_en;
141}; 143};
142 144
143Where: 145Where:
@@ -181,6 +183,8 @@ Where:
181 registers. init/exit callbacks should not use or modify 183 registers. init/exit callbacks should not use or modify
182 platform data. 184 platform data.
183 o bsp_priv: another private pointer. 185 o bsp_priv: another private pointer.
186 o has_gmac4: uses GMAC4 core.
187 o tso_en: Enables TSO (TCP Segmentation Offload) feature.
184 188
185For MDIO bus The we have: 189For MDIO bus The we have:
186 190
@@ -278,6 +282,13 @@ Please see the following document:
278 o stmmac_ethtool.c: to implement the ethtool support; 282 o stmmac_ethtool.c: to implement the ethtool support;
279 o stmmac.h: private driver structure; 283 o stmmac.h: private driver structure;
280 o common.h: common definitions and VFTs; 284 o common.h: common definitions and VFTs;
285 o mmc_core.c/mmc.h: Management MAC Counters;
286 o stmmac_hwtstamp.c: HW timestamp support for PTP;
287 o stmmac_ptp.c: PTP 1588 clock;
288 o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c
289 for STMicroelectronics SoCs.
290
291- GMAC 3.x
281 o descs.h: descriptor structure definitions; 292 o descs.h: descriptor structure definitions;
282 o dwmac1000_core.c: dwmac GiGa core functions; 293 o dwmac1000_core.c: dwmac GiGa core functions;
283 o dwmac1000_dma.c: dma functions for the GMAC chip; 294 o dwmac1000_dma.c: dma functions for the GMAC chip;
@@ -289,11 +300,32 @@ Please see the following document:
289 o enh_desc.c: functions for handling enhanced descriptors; 300 o enh_desc.c: functions for handling enhanced descriptors;
290 o norm_desc.c: functions for handling normal descriptors; 301 o norm_desc.c: functions for handling normal descriptors;
291 o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes; 302 o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes;
292 o mmc_core.c/mmc.h: Management MAC Counters; 303
293 o stmmac_hwtstamp.c: HW timestamp support for PTP; 304- GMAC4.x generation
294 o stmmac_ptp.c: PTP 1588 clock; 305 o dwmac4_core.c: dwmac GMAC4.x core functions;
295 o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c 306 o dwmac4_desc.c: functions for handling GMAC4.x descriptors;
296 for STMicroelectronics SoCs. 307 o dwmac4_descs.h: descriptor definitions;
308 o dwmac4_dma.c: dma functions for the GMAC4.x chip;
309 o dwmac4_dma.h: dma definitions for the GMAC4.x chip;
310 o dwmac4.h: core definitions for the GMAC4.x chip;
311 o dwmac4_lib.c: generic GMAC4.x functions;
312
3134.12) TSO support (GMAC4.x)
314
315TSO (Tcp Segmentation Offload) feature is supported by GMAC 4.x chip family.
316When a packet is sent through TCP protocol, the TCP stack ensures that
317the SKB provided to the low level driver (stmmac in our case) matches with
318the maximum frame len (IP header + TCP header + payload <= 1500 bytes (for
319MTU set to 1500)). It means that if an application using TCP want to send a
320packet which will have a length (after adding headers) > 1514 the packet
321will be split in several TCP packets: The data payload is split and headers
322(TCP/IP ..) are added. It is done by software.
323
324When TSO is enabled, the TCP stack doesn't care about the maximum frame
325length and provide SKB packet to stmmac as it is. The GMAC IP will have to
326perform the segmentation by it self to match with maximum frame length.
327
328This feature can be enabled in device tree through "snps,tso" entry.
297 329
2985) Debug Information 3305) Debug Information
299 331
diff --git a/MAINTAINERS b/MAINTAINERS
index 7ba7bc485d74..67d99dd0e2e5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3348,6 +3348,7 @@ F: Documentation/powerpc/cxlflash.txt
3348 3348
3349STMMAC ETHERNET DRIVER 3349STMMAC ETHERNET DRIVER
3350M: Giuseppe Cavallaro <peppe.cavallaro@st.com> 3350M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
3351M: Alexandre Torgue <alexandre.torgue@st.com>
3351L: netdev@vger.kernel.org 3352L: netdev@vger.kernel.org
3352W: http://www.stlinux.com 3353W: http://www.stlinux.com
3353S: Supported 3354S: Supported
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b3901616f4f6..0fb362d5a722 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 2stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
3 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 3 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
4 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 4 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
5 mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) 5 mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
6 dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y)
6 7
7# Ordering matters. Generic driver must be last. 8# Ordering matters. Generic driver must be last.
8obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o 9obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index f96d257308b0..fc60368df2e7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -41,6 +41,8 @@
41/* Synopsys Core versions */ 41/* Synopsys Core versions */
42#define DWMAC_CORE_3_40 0x34 42#define DWMAC_CORE_3_40 0x34
43#define DWMAC_CORE_3_50 0x35 43#define DWMAC_CORE_3_50 0x35
44#define DWMAC_CORE_4_00 0x40
45#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
44 46
45#define DMA_TX_SIZE 512 47#define DMA_TX_SIZE 512
46#define DMA_RX_SIZE 512 48#define DMA_RX_SIZE 512
@@ -167,6 +169,9 @@ struct stmmac_extra_stats {
167 unsigned long mtl_rx_fifo_ctrl_active; 169 unsigned long mtl_rx_fifo_ctrl_active;
168 unsigned long mac_rx_frame_ctrl_fifo; 170 unsigned long mac_rx_frame_ctrl_fifo;
169 unsigned long mac_gmii_rx_proto_engine; 171 unsigned long mac_gmii_rx_proto_engine;
172 /* TSO */
173 unsigned long tx_tso_frames;
174 unsigned long tx_tso_nfrags;
170}; 175};
171 176
172/* CSR Frequency Access Defines*/ 177/* CSR Frequency Access Defines*/
@@ -243,6 +248,7 @@ enum rx_frame_status {
243 csum_none = 0x2, 248 csum_none = 0x2,
244 llc_snap = 0x4, 249 llc_snap = 0x4,
245 dma_own = 0x8, 250 dma_own = 0x8,
251 rx_not_ls = 0x10,
246}; 252};
247 253
248/* Tx status */ 254/* Tx status */
@@ -269,6 +275,7 @@ enum dma_irq_status {
269#define CORE_PCS_ANE_COMPLETE (1 << 5) 275#define CORE_PCS_ANE_COMPLETE (1 << 5)
270#define CORE_PCS_LINK_STATUS (1 << 6) 276#define CORE_PCS_LINK_STATUS (1 << 6)
271#define CORE_RGMII_IRQ (1 << 7) 277#define CORE_RGMII_IRQ (1 << 7)
278#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
272 279
273/* Physical Coding Sublayer */ 280/* Physical Coding Sublayer */
274struct rgmii_adv { 281struct rgmii_adv {
@@ -300,8 +307,10 @@ struct dma_features {
300 /* 802.3az - Energy-Efficient Ethernet (EEE) */ 307 /* 802.3az - Energy-Efficient Ethernet (EEE) */
301 unsigned int eee; 308 unsigned int eee;
302 unsigned int av; 309 unsigned int av;
310 unsigned int tsoen;
303 /* TX and RX csum */ 311 /* TX and RX csum */
304 unsigned int tx_coe; 312 unsigned int tx_coe;
313 unsigned int rx_coe;
305 unsigned int rx_coe_type1; 314 unsigned int rx_coe_type1;
306 unsigned int rx_coe_type2; 315 unsigned int rx_coe_type2;
307 unsigned int rxfifo_over_2048; 316 unsigned int rxfifo_over_2048;
@@ -348,6 +357,10 @@ struct stmmac_desc_ops {
348 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 357 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
349 bool csum_flag, int mode, bool tx_own, 358 bool csum_flag, int mode, bool tx_own,
350 bool ls); 359 bool ls);
360 void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
361 int len2, bool tx_own, bool ls,
362 unsigned int tcphdrlen,
363 unsigned int tcppayloadlen);
351 /* Set/get the owner of the descriptor */ 364 /* Set/get the owner of the descriptor */
352 void (*set_tx_owner) (struct dma_desc *p); 365 void (*set_tx_owner) (struct dma_desc *p);
353 int (*get_tx_owner) (struct dma_desc *p); 366 int (*get_tx_owner) (struct dma_desc *p);
@@ -380,6 +393,10 @@ struct stmmac_desc_ops {
380 u64(*get_timestamp) (void *desc, u32 ats); 393 u64(*get_timestamp) (void *desc, u32 ats);
381 /* get rx timestamp status */ 394 /* get rx timestamp status */
382 int (*get_rx_timestamp_status) (void *desc, u32 ats); 395 int (*get_rx_timestamp_status) (void *desc, u32 ats);
396 /* Display ring */
397 void (*display_ring)(void *head, unsigned int size, bool rx);
398 /* set MSS via context descriptor */
399 void (*set_mss)(struct dma_desc *p, unsigned int mss);
383}; 400};
384 401
385extern const struct stmmac_desc_ops enh_desc_ops; 402extern const struct stmmac_desc_ops enh_desc_ops;
@@ -412,9 +429,15 @@ struct stmmac_dma_ops {
412 int (*dma_interrupt) (void __iomem *ioaddr, 429 int (*dma_interrupt) (void __iomem *ioaddr,
413 struct stmmac_extra_stats *x); 430 struct stmmac_extra_stats *x);
414 /* If supported then get the optional core features */ 431 /* If supported then get the optional core features */
415 unsigned int (*get_hw_feature) (void __iomem *ioaddr); 432 void (*get_hw_feature)(void __iomem *ioaddr,
433 struct dma_features *dma_cap);
416 /* Program the HW RX Watchdog */ 434 /* Program the HW RX Watchdog */
417 void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); 435 void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
436 void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
437 void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
438 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
439 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
440 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
418}; 441};
419 442
420struct mac_device_info; 443struct mac_device_info;
@@ -463,6 +486,7 @@ struct stmmac_hwtimestamp {
463}; 486};
464 487
465extern const struct stmmac_hwtimestamp stmmac_ptp; 488extern const struct stmmac_hwtimestamp stmmac_ptp;
489extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
466 490
467struct mac_link { 491struct mac_link {
468 int port; 492 int port;
@@ -495,7 +519,6 @@ struct mac_device_info {
495 const struct stmmac_hwtimestamp *ptp; 519 const struct stmmac_hwtimestamp *ptp;
496 struct mii_regs mii; /* MII register Addresses */ 520 struct mii_regs mii; /* MII register Addresses */
497 struct mac_link link; 521 struct mac_link link;
498 unsigned int synopsys_uid;
499 void __iomem *pcsr; /* vpointer to device CSRs */ 522 void __iomem *pcsr; /* vpointer to device CSRs */
500 int multicast_filter_bins; 523 int multicast_filter_bins;
501 int unicast_filter_entries; 524 int unicast_filter_entries;
@@ -504,18 +527,47 @@ struct mac_device_info {
504}; 527};
505 528
506struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 529struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
507 int perfect_uc_entries); 530 int perfect_uc_entries,
508struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); 531 int *synopsys_id);
532struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
533struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
534 int perfect_uc_entries, int *synopsys_id);
509 535
510void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], 536void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
511 unsigned int high, unsigned int low); 537 unsigned int high, unsigned int low);
512void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, 538void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
513 unsigned int high, unsigned int low); 539 unsigned int high, unsigned int low);
514
515void stmmac_set_mac(void __iomem *ioaddr, bool enable); 540void stmmac_set_mac(void __iomem *ioaddr, bool enable);
516 541
542void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
543 unsigned int high, unsigned int low);
544void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
545 unsigned int high, unsigned int low);
546void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
547
517void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 548void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
518extern const struct stmmac_mode_ops ring_mode_ops; 549extern const struct stmmac_mode_ops ring_mode_ops;
519extern const struct stmmac_mode_ops chain_mode_ops; 550extern const struct stmmac_mode_ops chain_mode_ops;
520 551extern const struct stmmac_desc_ops dwmac4_desc_ops;
552
553/**
554 * stmmac_get_synopsys_id - return the SYINID.
555 * @priv: driver private structure
556 * Description: this simple function is to decode and return the SYINID
557 * starting from the HW core register.
558 */
559static inline u32 stmmac_get_synopsys_id(u32 hwid)
560{
561 /* Check Synopsys Id (not available on old chips) */
562 if (likely(hwid)) {
563 u32 uid = ((hwid & 0x0000ff00) >> 8);
564 u32 synid = (hwid & 0x000000ff);
565
566 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
567 uid, synid);
568
569 return synid;
570 }
571 return 0;
572}
521#endif /* __COMMON_H__ */ 573#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index c2941172f6d1..fb1eb578e34e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -491,7 +491,8 @@ static const struct stmmac_ops dwmac1000_ops = {
491}; 491};
492 492
493struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 493struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
494 int perfect_uc_entries) 494 int perfect_uc_entries,
495 int *synopsys_id)
495{ 496{
496 struct mac_device_info *mac; 497 struct mac_device_info *mac;
497 u32 hwid = readl(ioaddr + GMAC_VERSION); 498 u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -516,7 +517,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
516 mac->link.speed = GMAC_CONTROL_FES; 517 mac->link.speed = GMAC_CONTROL_FES;
517 mac->mii.addr = GMAC_MII_ADDR; 518 mac->mii.addr = GMAC_MII_ADDR;
518 mac->mii.data = GMAC_MII_DATA; 519 mac->mii.data = GMAC_MII_DATA;
519 mac->synopsys_uid = hwid; 520
521 /* Get and dump the chip ID */
522 *synopsys_id = stmmac_get_synopsys_id(hwid);
520 523
521 return mac; 524 return mac;
522} 525}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da32d6037e3e..990746955216 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
215 } 215 }
216} 216}
217 217
218static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr) 218static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
219 struct dma_features *dma_cap)
219{ 220{
220 return readl(ioaddr + DMA_HW_FEATURE); 221 u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
222
223 dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
224 dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
225 dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
226 dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
227 dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
228 dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
229 dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
230 dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
231 dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
232 /* MMC */
233 dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
234 /* IEEE 1588-2002 */
235 dma_cap->time_stamp =
236 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
237 /* IEEE 1588-2008 */
238 dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
239 /* 802.3az - Energy-Efficient Ethernet (EEE) */
240 dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
241 dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
242 /* TX and RX csum */
243 dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
244 dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
245 dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
246 dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
247 /* TX and RX number of channels */
248 dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
249 dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
250 /* Alternate (enhanced) DESC mode */
251 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
221} 252}
222 253
223static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) 254static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f8dd773f246c..6418b2e07619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = {
173 .get_umac_addr = dwmac100_get_umac_addr, 173 .get_umac_addr = dwmac100_get_umac_addr,
174}; 174};
175 175
176struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) 176struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
177{ 177{
178 struct mac_device_info *mac; 178 struct mac_device_info *mac;
179 179
@@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
192 mac->link.speed = 0; 192 mac->link.speed = 0;
193 mac->mii.addr = MAC_MII_ADDR; 193 mac->mii.addr = MAC_MII_ADDR;
194 mac->mii.data = MAC_MII_DATA; 194 mac->mii.data = MAC_MII_DATA;
195 mac->synopsys_uid = 0; 195 /* Synopsys Id is not available on old chips */
196 *synopsys_id = 0;
196 197
197 return mac; 198 return mac;
198} 199}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 000000000000..bc50952a18e7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,255 @@
1/*
2 * DWMAC4 Header file.
3 *
4 * Copyright (C) 2015 STMicroelectronics Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13#ifndef __DWMAC4_H__
14#define __DWMAC4_H__
15
16#include "common.h"
17
18/* MAC registers */
19#define GMAC_CONFIG 0x00000000
20#define GMAC_PACKET_FILTER 0x00000008
21#define GMAC_HASH_TAB_0_31 0x00000010
22#define GMAC_HASH_TAB_32_63 0x00000014
23#define GMAC_RX_FLOW_CTRL 0x00000090
24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
25#define GMAC_INT_STATUS 0x000000b0
26#define GMAC_INT_EN 0x000000b4
27#define GMAC_AN_CTRL 0x000000e0
28#define GMAC_AN_STATUS 0x000000e4
29#define GMAC_AN_ADV 0x000000e8
30#define GMAC_AN_LPA 0x000000ec
31#define GMAC_PMT 0x000000c0
32#define GMAC_VERSION 0x00000110
33#define GMAC_DEBUG 0x00000114
34#define GMAC_HW_FEATURE0 0x0000011c
35#define GMAC_HW_FEATURE1 0x00000120
36#define GMAC_HW_FEATURE2 0x00000124
37#define GMAC_MDIO_ADDR 0x00000200
38#define GMAC_MDIO_DATA 0x00000204
39#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
40#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
41
42/* MAC Packet Filtering */
43#define GMAC_PACKET_FILTER_PR BIT(0)
44#define GMAC_PACKET_FILTER_HMC BIT(2)
45#define GMAC_PACKET_FILTER_PM BIT(4)
46
47#define GMAC_MAX_PERFECT_ADDRESSES 128
48
49/* MAC Flow Control RX */
50#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
51
52/* MAC Flow Control TX */
53#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
54#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
55
56/* MAC Interrupt bitmap*/
57#define GMAC_INT_PMT_EN BIT(4)
58#define GMAC_INT_LPI_EN BIT(5)
59
60enum dwmac4_irq_status {
61 time_stamp_irq = 0x00001000,
62 mmc_rx_csum_offload_irq = 0x00000800,
63 mmc_tx_irq = 0x00000400,
64 mmc_rx_irq = 0x00000200,
65 mmc_irq = 0x00000100,
66 pmt_irq = 0x00000010,
67 pcs_ane_irq = 0x00000004,
68 pcs_link_irq = 0x00000002,
69};
70
71/* MAC Auto-Neg bitmap*/
72#define GMAC_AN_CTRL_RAN BIT(9)
73#define GMAC_AN_CTRL_ANE BIT(12)
74#define GMAC_AN_CTRL_ELE BIT(14)
75#define GMAC_AN_FD BIT(5)
76#define GMAC_AN_HD BIT(6)
77#define GMAC_AN_PSE_MASK GENMASK(8, 7)
78#define GMAC_AN_PSE_SHIFT 7
79
80/* MAC PMT bitmap */
81enum power_event {
82 pointer_reset = 0x80000000,
83 global_unicast = 0x00000200,
84 wake_up_rx_frame = 0x00000040,
85 magic_frame = 0x00000020,
86 wake_up_frame_en = 0x00000004,
87 magic_pkt_en = 0x00000002,
88 power_down = 0x00000001,
89};
90
91/* MAC Debug bitmap */
92#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
93#define GMAC_DEBUG_TFCSTS_SHIFT 17
94#define GMAC_DEBUG_TFCSTS_IDLE 0
95#define GMAC_DEBUG_TFCSTS_WAIT 1
96#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
97#define GMAC_DEBUG_TFCSTS_XFER 3
98#define GMAC_DEBUG_TPESTS BIT(16)
99#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
100#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
101#define GMAC_DEBUG_RPESTS BIT(0)
102
103/* MAC config */
104#define GMAC_CONFIG_IPC BIT(27)
105#define GMAC_CONFIG_2K BIT(22)
106#define GMAC_CONFIG_ACS BIT(20)
107#define GMAC_CONFIG_BE BIT(18)
108#define GMAC_CONFIG_JD BIT(17)
109#define GMAC_CONFIG_JE BIT(16)
110#define GMAC_CONFIG_PS BIT(15)
111#define GMAC_CONFIG_FES BIT(14)
112#define GMAC_CONFIG_DM BIT(13)
113#define GMAC_CONFIG_DCRS BIT(9)
114#define GMAC_CONFIG_TE BIT(1)
115#define GMAC_CONFIG_RE BIT(0)
116
117/* MAC HW features0 bitmap */
118#define GMAC_HW_FEAT_ADDMAC BIT(18)
119#define GMAC_HW_FEAT_RXCOESEL BIT(16)
120#define GMAC_HW_FEAT_TXCOSEL BIT(14)
121#define GMAC_HW_FEAT_EEESEL BIT(13)
122#define GMAC_HW_FEAT_TSSEL BIT(12)
123#define GMAC_HW_FEAT_MMCSEL BIT(8)
124#define GMAC_HW_FEAT_MGKSEL BIT(7)
125#define GMAC_HW_FEAT_RWKSEL BIT(6)
126#define GMAC_HW_FEAT_SMASEL BIT(5)
127#define GMAC_HW_FEAT_VLHASH BIT(4)
128#define GMAC_HW_FEAT_PCSSEL BIT(3)
129#define GMAC_HW_FEAT_HDSEL BIT(2)
130#define GMAC_HW_FEAT_GMIISEL BIT(1)
131#define GMAC_HW_FEAT_MIISEL BIT(0)
132
133/* MAC HW features1 bitmap */
134#define GMAC_HW_FEAT_AVSEL BIT(20)
135#define GMAC_HW_TSOEN BIT(18)
136
137/* MAC HW features2 bitmap */
138#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
139#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
140
141/* MAC HW ADDR regs */
142#define GMAC_HI_DCS GENMASK(18, 16)
143#define GMAC_HI_DCS_SHIFT 16
144#define GMAC_HI_REG_AE BIT(31)
145
146/* MTL registers */
147#define MTL_INT_STATUS 0x00000c20
148#define MTL_INT_Q0 BIT(0)
149
150#define MTL_CHAN_BASE_ADDR 0x00000d00
151#define MTL_CHAN_BASE_OFFSET 0x40
152#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
153 (x * MTL_CHAN_BASE_OFFSET))
154
155#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
156#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
157#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
158#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
159#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
160
161#define MTL_OP_MODE_RSF BIT(5)
162#define MTL_OP_MODE_TSF BIT(1)
163
164#define MTL_OP_MODE_TTC_MASK 0x70
165#define MTL_OP_MODE_TTC_SHIFT 4
166
167#define MTL_OP_MODE_TTC_32 0
168#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
169#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
170#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
171#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
172#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
173#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
174#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
175
176#define MTL_OP_MODE_RTC_MASK 0x18
177#define MTL_OP_MODE_RTC_SHIFT 3
178
179#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
180#define MTL_OP_MODE_RTC_64 0
181#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
182#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
183
184/* MTL debug */
185#define MTL_DEBUG_TXSTSFSTS BIT(5)
186#define MTL_DEBUG_TXFSTS BIT(4)
187#define MTL_DEBUG_TWCSTS BIT(3)
188
189/* MTL debug: Tx FIFO Read Controller Status */
190#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
191#define MTL_DEBUG_TRCSTS_SHIFT 1
192#define MTL_DEBUG_TRCSTS_IDLE 0
193#define MTL_DEBUG_TRCSTS_READ 1
194#define MTL_DEBUG_TRCSTS_TXW 2
195#define MTL_DEBUG_TRCSTS_WRITE 3
196#define MTL_DEBUG_TXPAUSED BIT(0)
197
198/* MAC debug: GMII or MII Transmit Protocol Engine Status */
199#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
200#define MTL_DEBUG_RXFSTS_SHIFT 4
201#define MTL_DEBUG_RXFSTS_EMPTY 0
202#define MTL_DEBUG_RXFSTS_BT 1
203#define MTL_DEBUG_RXFSTS_AT 2
204#define MTL_DEBUG_RXFSTS_FULL 3
205#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
206#define MTL_DEBUG_RRCSTS_SHIFT 1
207#define MTL_DEBUG_RRCSTS_IDLE 0
208#define MTL_DEBUG_RRCSTS_RDATA 1
209#define MTL_DEBUG_RRCSTS_RSTAT 2
210#define MTL_DEBUG_RRCSTS_FLUSH 3
211#define MTL_DEBUG_RWCSTS BIT(0)
212
213/* MTL interrupt */
214#define MTL_RX_OVERFLOW_INT_EN BIT(24)
215#define MTL_RX_OVERFLOW_INT BIT(16)
216
217/* Default operating mode of the MAC */
218#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
219 GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
220
221/* To dump the core regs excluding the Address Registers */
222#define GMAC_REG_NUM 132
223
224/* MTL debug */
225#define MTL_DEBUG_TXSTSFSTS BIT(5)
226#define MTL_DEBUG_TXFSTS BIT(4)
227#define MTL_DEBUG_TWCSTS BIT(3)
228
229/* MTL debug: Tx FIFO Read Controller Status */
230#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
231#define MTL_DEBUG_TRCSTS_SHIFT 1
232#define MTL_DEBUG_TRCSTS_IDLE 0
233#define MTL_DEBUG_TRCSTS_READ 1
234#define MTL_DEBUG_TRCSTS_TXW 2
235#define MTL_DEBUG_TRCSTS_WRITE 3
236#define MTL_DEBUG_TXPAUSED BIT(0)
237
238/* MAC debug: GMII or MII Transmit Protocol Engine Status */
239#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
240#define MTL_DEBUG_RXFSTS_SHIFT 4
241#define MTL_DEBUG_RXFSTS_EMPTY 0
242#define MTL_DEBUG_RXFSTS_BT 1
243#define MTL_DEBUG_RXFSTS_AT 2
244#define MTL_DEBUG_RXFSTS_FULL 3
245#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
246#define MTL_DEBUG_RRCSTS_SHIFT 1
247#define MTL_DEBUG_RRCSTS_IDLE 0
248#define MTL_DEBUG_RRCSTS_RDATA 1
249#define MTL_DEBUG_RRCSTS_RSTAT 2
250#define MTL_DEBUG_RRCSTS_FLUSH 3
251#define MTL_DEBUG_RWCSTS BIT(0)
252
253extern const struct stmmac_dma_ops dwmac4_dma_ops;
254extern const struct stmmac_dma_ops dwmac410_dma_ops;
255#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 000000000000..4f7283d05588
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,407 @@
1/*
2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.00 has been used for developing this code.
4 *
5 * This only implements the mac core functions for this chip.
6 *
7 * Copyright (C) 2015 STMicroelectronics Ltd
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
14 */
15
16#include <linux/crc32.h>
17#include <linux/slab.h>
18#include <linux/ethtool.h>
19#include <linux/io.h>
20#include "dwmac4.h"
21
22static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
23{
24 void __iomem *ioaddr = hw->pcsr;
25 u32 value = readl(ioaddr + GMAC_CONFIG);
26
27 value |= GMAC_CORE_INIT;
28
29 if (mtu > 1500)
30 value |= GMAC_CONFIG_2K;
31 if (mtu > 2000)
32 value |= GMAC_CONFIG_JE;
33
34 writel(value, ioaddr + GMAC_CONFIG);
35
36 /* Mask GMAC interrupts */
37 writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
38}
39
40static void dwmac4_dump_regs(struct mac_device_info *hw)
41{
42 void __iomem *ioaddr = hw->pcsr;
43 int i;
44
45 pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
46
47 for (i = 0; i < GMAC_REG_NUM; i++) {
48 int offset = i * 4;
49
50 pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
51 offset, readl(ioaddr + offset));
52 }
53}
54
55static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
56{
57 void __iomem *ioaddr = hw->pcsr;
58 u32 value = readl(ioaddr + GMAC_CONFIG);
59
60 if (hw->rx_csum)
61 value |= GMAC_CONFIG_IPC;
62 else
63 value &= ~GMAC_CONFIG_IPC;
64
65 writel(value, ioaddr + GMAC_CONFIG);
66
67 value = readl(ioaddr + GMAC_CONFIG);
68
69 return !!(value & GMAC_CONFIG_IPC);
70}
71
72static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
73{
74 void __iomem *ioaddr = hw->pcsr;
75 unsigned int pmt = 0;
76
77 if (mode & WAKE_MAGIC) {
78 pr_debug("GMAC: WOL Magic frame\n");
79 pmt |= power_down | magic_pkt_en;
80 }
81 if (mode & WAKE_UCAST) {
82 pr_debug("GMAC: WOL on global unicast\n");
83 pmt |= global_unicast;
84 }
85
86 writel(pmt, ioaddr + GMAC_PMT);
87}
88
89static void dwmac4_set_umac_addr(struct mac_device_info *hw,
90 unsigned char *addr, unsigned int reg_n)
91{
92 void __iomem *ioaddr = hw->pcsr;
93
94 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
95 GMAC_ADDR_LOW(reg_n));
96}
97
98static void dwmac4_get_umac_addr(struct mac_device_info *hw,
99 unsigned char *addr, unsigned int reg_n)
100{
101 void __iomem *ioaddr = hw->pcsr;
102
103 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
104 GMAC_ADDR_LOW(reg_n));
105}
106
107static void dwmac4_set_filter(struct mac_device_info *hw,
108 struct net_device *dev)
109{
110 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
111 unsigned int value = 0;
112
113 if (dev->flags & IFF_PROMISC) {
114 value = GMAC_PACKET_FILTER_PR;
115 } else if ((dev->flags & IFF_ALLMULTI) ||
116 (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
117 /* Pass all multi */
118 value = GMAC_PACKET_FILTER_PM;
119 /* Set the 64 bits of the HASH tab. To be updated if taller
120 * hash table is used
121 */
122 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
123 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
124 } else if (!netdev_mc_empty(dev)) {
125 u32 mc_filter[2];
126 struct netdev_hw_addr *ha;
127
128 /* Hash filter for multicast */
129 value = GMAC_PACKET_FILTER_HMC;
130
131 memset(mc_filter, 0, sizeof(mc_filter));
132 netdev_for_each_mc_addr(ha, dev) {
133 /* The upper 6 bits of the calculated CRC are used to
134 * index the content of the Hash Table Reg 0 and 1.
135 */
136 int bit_nr =
137 (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
138 /* The most significant bit determines the register
139 * to use while the other 5 bits determines the bit
140 * within the selected register
141 */
142 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
143 }
144 writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
145 writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
146 }
147
148 /* Handle multiple unicast addresses */
149 if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
150 /* Switch to promiscuous mode if more than 128 addrs
151 * are required
152 */
153 value |= GMAC_PACKET_FILTER_PR;
154 } else if (!netdev_uc_empty(dev)) {
155 int reg = 1;
156 struct netdev_hw_addr *ha;
157
158 netdev_for_each_uc_addr(ha, dev) {
159 dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
160 reg++;
161 }
162 }
163
164 writel(value, ioaddr + GMAC_PACKET_FILTER);
165}
166
167static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
168 unsigned int fc, unsigned int pause_time)
169{
170 void __iomem *ioaddr = hw->pcsr;
171 u32 channel = STMMAC_CHAN0; /* FIXME */
172 unsigned int flow = 0;
173
174 pr_debug("GMAC Flow-Control:\n");
175 if (fc & FLOW_RX) {
176 pr_debug("\tReceive Flow-Control ON\n");
177 flow |= GMAC_RX_FLOW_CTRL_RFE;
178 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
179 }
180 if (fc & FLOW_TX) {
181 pr_debug("\tTransmit Flow-Control ON\n");
182 flow |= GMAC_TX_FLOW_CTRL_TFE;
183 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
184
185 if (duplex) {
186 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
187 flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
188 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
189 }
190 }
191}
192
193static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
194{
195 void __iomem *ioaddr = hw->pcsr;
196
197 /* auto negotiation enable and External Loopback enable */
198 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
199
200 if (restart)
201 value |= GMAC_AN_CTRL_RAN;
202
203 writel(value, ioaddr + GMAC_AN_CTRL);
204}
205
206static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
207{
208 void __iomem *ioaddr = hw->pcsr;
209 u32 value = readl(ioaddr + GMAC_AN_ADV);
210
211 if (value & GMAC_AN_FD)
212 adv->duplex = DUPLEX_FULL;
213 if (value & GMAC_AN_HD)
214 adv->duplex |= DUPLEX_HALF;
215
216 adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
217
218 value = readl(ioaddr + GMAC_AN_LPA);
219
220 if (value & GMAC_AN_FD)
221 adv->lp_duplex = DUPLEX_FULL;
222 if (value & GMAC_AN_HD)
223 adv->lp_duplex = DUPLEX_HALF;
224
225 adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
226}
227
228static int dwmac4_irq_status(struct mac_device_info *hw,
229 struct stmmac_extra_stats *x)
230{
231 void __iomem *ioaddr = hw->pcsr;
232 u32 mtl_int_qx_status;
233 u32 intr_status;
234 int ret = 0;
235
236 intr_status = readl(ioaddr + GMAC_INT_STATUS);
237
238 /* Not used events (e.g. MMC interrupts) are not handled. */
239 if ((intr_status & mmc_tx_irq))
240 x->mmc_tx_irq_n++;
241 if (unlikely(intr_status & mmc_rx_irq))
242 x->mmc_rx_irq_n++;
243 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
244 x->mmc_rx_csum_offload_irq_n++;
245 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
246 if (unlikely(intr_status & pmt_irq)) {
247 readl(ioaddr + GMAC_PMT);
248 x->irq_receive_pmt_irq_n++;
249 }
250
251 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
252 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++;
254 }
255
256 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
257 /* Check MTL Interrupt: Currently only one queue is used: Q0. */
258 if (mtl_int_qx_status & MTL_INT_Q0) {
259 /* read Queue 0 Interrupt status */
260 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
261
262 if (status & MTL_RX_OVERFLOW_INT) {
263 /* clear Interrupt */
264 writel(status | MTL_RX_OVERFLOW_INT,
265 ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
266 ret = CORE_IRQ_MTL_RX_OVERFLOW;
267 }
268 }
269
270 return ret;
271}
272
273static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
274{
275 u32 value;
276
277 /* Currently only channel 0 is supported */
278 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
279
280 if (value & MTL_DEBUG_TXSTSFSTS)
281 x->mtl_tx_status_fifo_full++;
282 if (value & MTL_DEBUG_TXFSTS)
283 x->mtl_tx_fifo_not_empty++;
284 if (value & MTL_DEBUG_TWCSTS)
285 x->mmtl_fifo_ctrl++;
286 if (value & MTL_DEBUG_TRCSTS_MASK) {
287 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
288 >> MTL_DEBUG_TRCSTS_SHIFT;
289 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
290 x->mtl_tx_fifo_read_ctrl_write++;
291 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
292 x->mtl_tx_fifo_read_ctrl_wait++;
293 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
294 x->mtl_tx_fifo_read_ctrl_read++;
295 else
296 x->mtl_tx_fifo_read_ctrl_idle++;
297 }
298 if (value & MTL_DEBUG_TXPAUSED)
299 x->mac_tx_in_pause++;
300
301 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
302
303 if (value & MTL_DEBUG_RXFSTS_MASK) {
304 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
305 >> MTL_DEBUG_RRCSTS_SHIFT;
306
307 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
308 x->mtl_rx_fifo_fill_level_full++;
309 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
310 x->mtl_rx_fifo_fill_above_thresh++;
311 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
312 x->mtl_rx_fifo_fill_below_thresh++;
313 else
314 x->mtl_rx_fifo_fill_level_empty++;
315 }
316 if (value & MTL_DEBUG_RRCSTS_MASK) {
317 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
318 MTL_DEBUG_RRCSTS_SHIFT;
319
320 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
321 x->mtl_rx_fifo_read_ctrl_flush++;
322 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
323 x->mtl_rx_fifo_read_ctrl_read_data++;
324 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
325 x->mtl_rx_fifo_read_ctrl_status++;
326 else
327 x->mtl_rx_fifo_read_ctrl_idle++;
328 }
329 if (value & MTL_DEBUG_RWCSTS)
330 x->mtl_rx_fifo_ctrl_active++;
331
332 /* GMAC debug */
333 value = readl(ioaddr + GMAC_DEBUG);
334
335 if (value & GMAC_DEBUG_TFCSTS_MASK) {
336 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
337 >> GMAC_DEBUG_TFCSTS_SHIFT;
338
339 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
340 x->mac_tx_frame_ctrl_xfer++;
341 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
342 x->mac_tx_frame_ctrl_pause++;
343 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
344 x->mac_tx_frame_ctrl_wait++;
345 else
346 x->mac_tx_frame_ctrl_idle++;
347 }
348 if (value & GMAC_DEBUG_TPESTS)
349 x->mac_gmii_tx_proto_engine++;
350 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
351 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
352 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
353 if (value & GMAC_DEBUG_RPESTS)
354 x->mac_gmii_rx_proto_engine++;
355}
356
357static const struct stmmac_ops dwmac4_ops = {
358 .core_init = dwmac4_core_init,
359 .rx_ipc = dwmac4_rx_ipc_enable,
360 .dump_regs = dwmac4_dump_regs,
361 .host_irq_status = dwmac4_irq_status,
362 .flow_ctrl = dwmac4_flow_ctrl,
363 .pmt = dwmac4_pmt,
364 .set_umac_addr = dwmac4_set_umac_addr,
365 .get_umac_addr = dwmac4_get_umac_addr,
366 .ctrl_ane = dwmac4_ctrl_ane,
367 .get_adv = dwmac4_get_adv,
368 .debug = dwmac4_debug,
369 .set_filter = dwmac4_set_filter,
370};
371
372struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
373 int perfect_uc_entries, int *synopsys_id)
374{
375 struct mac_device_info *mac;
376 u32 hwid = readl(ioaddr + GMAC_VERSION);
377
378 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
379 if (!mac)
380 return NULL;
381
382 mac->pcsr = ioaddr;
383 mac->multicast_filter_bins = mcbins;
384 mac->unicast_filter_entries = perfect_uc_entries;
385 mac->mcast_bits_log2 = 0;
386
387 if (mac->multicast_filter_bins)
388 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
389
390 mac->mac = &dwmac4_ops;
391
392 mac->link.port = GMAC_CONFIG_PS;
393 mac->link.duplex = GMAC_CONFIG_DM;
394 mac->link.speed = GMAC_CONFIG_FES;
395 mac->mii.addr = GMAC_MDIO_ADDR;
396 mac->mii.data = GMAC_MDIO_DATA;
397
398 /* Get and dump the chip ID */
399 *synopsys_id = stmmac_get_synopsys_id(hwid);
400
401 if (*synopsys_id > DWMAC_CORE_4_00)
402 mac->dma = &dwmac410_dma_ops;
403 else
404 mac->dma = &dwmac4_dma_ops;
405
406 return mac;
407}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 000000000000..d4952c7a836d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,396 @@
1/*
2 * This contains the functions to handle the descriptors for DesignWare databook
3 * 4.xx.
4 *
5 * Copyright (C) 2015 STMicroelectronics Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * Author: Alexandre Torgue <alexandre.torgue@st.com>
12 */
13
14#include <linux/stmmac.h>
15#include "common.h"
16#include "dwmac4_descs.h"
17
18static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
19 struct dma_desc *p,
20 void __iomem *ioaddr)
21{
22 struct net_device_stats *stats = (struct net_device_stats *)data;
23 unsigned int tdes3;
24 int ret = tx_done;
25
26 tdes3 = p->des3;
27
28 /* Get tx owner first */
29 if (unlikely(tdes3 & TDES3_OWN))
30 return tx_dma_own;
31
32 /* Verify tx error by looking at the last segment. */
33 if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
34 return tx_not_ls;
35
36 if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
37 if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
38 x->tx_jabber++;
39 if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
40 x->tx_frame_flushed++;
41 if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
42 x->tx_losscarrier++;
43 stats->tx_carrier_errors++;
44 }
45 if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
46 x->tx_carrier++;
47 stats->tx_carrier_errors++;
48 }
49 if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
50 (tdes3 & TDES3_EXCESSIVE_COLLISION)))
51 stats->collisions +=
52 (tdes3 & TDES3_COLLISION_COUNT_MASK)
53 >> TDES3_COLLISION_COUNT_SHIFT;
54
55 if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
56 x->tx_deferred++;
57
58 if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
59 x->tx_underflow++;
60
61 if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
62 x->tx_ip_header_error++;
63
64 if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
65 x->tx_payload_error++;
66
67 ret = tx_err;
68 }
69
70 if (unlikely(tdes3 & TDES3_DEFERRED))
71 x->tx_deferred++;
72
73 return ret;
74}
75
76static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
77 struct dma_desc *p)
78{
79 struct net_device_stats *stats = (struct net_device_stats *)data;
80 unsigned int rdes1 = p->des1;
81 unsigned int rdes2 = p->des2;
82 unsigned int rdes3 = p->des3;
83 int message_type;
84 int ret = good_frame;
85
86 if (unlikely(rdes3 & RDES3_OWN))
87 return dma_own;
88
89 /* Verify rx error by looking at the last segment. */
90 if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
91 return discard_frame;
92
93 if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
94 if (unlikely(rdes3 & RDES3_GIANT_PACKET))
95 stats->rx_length_errors++;
96 if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
97 x->rx_gmac_overflow++;
98
99 if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
100 x->rx_watchdog++;
101
102 if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
103 x->rx_mii++;
104
105 if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
106 x->rx_crc++;
107 stats->rx_crc_errors++;
108 }
109
110 if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
111 x->dribbling_bit++;
112
113 ret = discard_frame;
114 }
115
116 message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
117
118 if (rdes1 & RDES1_IP_HDR_ERROR)
119 x->ip_hdr_err++;
120 if (rdes1 & RDES1_IP_CSUM_BYPASSED)
121 x->ip_csum_bypassed++;
122 if (rdes1 & RDES1_IPV4_HEADER)
123 x->ipv4_pkt_rcvd++;
124 if (rdes1 & RDES1_IPV6_HEADER)
125 x->ipv6_pkt_rcvd++;
126 if (message_type == RDES_EXT_SYNC)
127 x->rx_msg_type_sync++;
128 else if (message_type == RDES_EXT_FOLLOW_UP)
129 x->rx_msg_type_follow_up++;
130 else if (message_type == RDES_EXT_DELAY_REQ)
131 x->rx_msg_type_delay_req++;
132 else if (message_type == RDES_EXT_DELAY_RESP)
133 x->rx_msg_type_delay_resp++;
134 else if (message_type == RDES_EXT_PDELAY_REQ)
135 x->rx_msg_type_pdelay_req++;
136 else if (message_type == RDES_EXT_PDELAY_RESP)
137 x->rx_msg_type_pdelay_resp++;
138 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
139 x->rx_msg_type_pdelay_follow_up++;
140 else
141 x->rx_msg_type_ext_no_ptp++;
142
143 if (rdes1 & RDES1_PTP_PACKET_TYPE)
144 x->ptp_frame_type++;
145 if (rdes1 & RDES1_PTP_VER)
146 x->ptp_ver++;
147 if (rdes1 & RDES1_TIMESTAMP_DROPPED)
148 x->timestamp_dropped++;
149
150 if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
151 x->sa_rx_filter_fail++;
152 ret = discard_frame;
153 }
154 if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
155 x->da_rx_filter_fail++;
156 ret = discard_frame;
157 }
158
159 if (rdes2 & RDES2_L3_FILTER_MATCH)
160 x->l3_filter_match++;
161 if (rdes2 & RDES2_L4_FILTER_MATCH)
162 x->l4_filter_match++;
163 if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
164 >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
165 x->l3_l4_filter_no_match++;
166
167 return ret;
168}
169
170static int dwmac4_rd_get_tx_len(struct dma_desc *p)
171{
172 return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
173}
174
175static int dwmac4_get_tx_owner(struct dma_desc *p)
176{
177 return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
178}
179
180static void dwmac4_set_tx_owner(struct dma_desc *p)
181{
182 p->des3 |= TDES3_OWN;
183}
184
185static void dwmac4_set_rx_owner(struct dma_desc *p)
186{
187 p->des3 |= RDES3_OWN;
188}
189
190static int dwmac4_get_tx_ls(struct dma_desc *p)
191{
192 return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
193}
194
195static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
196{
197 return (p->des3 & RDES3_PACKET_SIZE_MASK);
198}
199
200static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
201{
202 p->des2 |= TDES2_TIMESTAMP_ENABLE;
203}
204
205static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
206{
207 return (p->des3 & TDES3_TIMESTAMP_STATUS)
208 >> TDES3_TIMESTAMP_STATUS_SHIFT;
209}
210
211/* NOTE: For RX CTX bit has to be checked before
212 * HAVE a specific function for TX and another one for RX
213 */
214static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
215{
216 struct dma_desc *p = (struct dma_desc *)desc;
217 u64 ns;
218
219 ns = p->des0;
220 /* convert high/sec time stamp value to nanosecond */
221 ns += p->des1 * 1000000000ULL;
222
223 return ns;
224}
225
226static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
227{
228 struct dma_desc *p = (struct dma_desc *)desc;
229
230 return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
231 >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
232}
233
234static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
235 int mode, int end)
236{
237 p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
238
239 if (!disable_rx_ic)
240 p->des3 |= RDES3_INT_ON_COMPLETION_EN;
241}
242
243static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
244{
245 p->des0 = 0;
246 p->des1 = 0;
247 p->des2 = 0;
248 p->des3 = 0;
249}
250
251static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
252 bool csum_flag, int mode, bool tx_own,
253 bool ls)
254{
255 unsigned int tdes3 = p->des3;
256
257 if (unlikely(len > BUF_SIZE_16KiB)) {
258 p->des2 |= (((len - BUF_SIZE_16KiB) <<
259 TDES2_BUFFER2_SIZE_MASK_SHIFT)
260 & TDES2_BUFFER2_SIZE_MASK)
261 | (BUF_SIZE_16KiB & TDES2_BUFFER1_SIZE_MASK);
262 } else {
263 p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
264 }
265
266 if (is_fs)
267 tdes3 |= TDES3_FIRST_DESCRIPTOR;
268 else
269 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
270
271 if (likely(csum_flag))
272 tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
273 else
274 tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
275
276 if (ls)
277 tdes3 |= TDES3_LAST_DESCRIPTOR;
278 else
279 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
280
281 /* Finally set the OWN bit. Later the DMA will start! */
282 if (tx_own)
283 tdes3 |= TDES3_OWN;
284
285 if (is_fs & tx_own)
286 /* When the own bit, for the first frame, has to be set, all
287 * descriptors for the same frame has to be set before, to
288 * avoid race condition.
289 */
290 wmb();
291
292 p->des3 = tdes3;
293}
294
295static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
296 int len1, int len2, bool tx_own,
297 bool ls, unsigned int tcphdrlen,
298 unsigned int tcppayloadlen)
299{
300 unsigned int tdes3 = p->des3;
301
302 if (len1)
303 p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
304
305 if (len2)
306 p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
307 & TDES2_BUFFER2_SIZE_MASK;
308
309 if (is_fs) {
310 tdes3 |= TDES3_FIRST_DESCRIPTOR |
311 TDES3_TCP_SEGMENTATION_ENABLE |
312 ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
313 TDES3_SLOT_NUMBER_MASK) |
314 ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
315 } else {
316 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
317 }
318
319 if (ls)
320 tdes3 |= TDES3_LAST_DESCRIPTOR;
321 else
322 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
323
324 /* Finally set the OWN bit. Later the DMA will start! */
325 if (tx_own)
326 tdes3 |= TDES3_OWN;
327
328 if (is_fs & tx_own)
329 /* When the own bit, for the first frame, has to be set, all
330 * descriptors for the same frame has to be set before, to
331 * avoid race condition.
332 */
333 wmb();
334
335 p->des3 = tdes3;
336}
337
338static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
339{
340 p->des2 = 0;
341 p->des3 = 0;
342}
343
344static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
345{
346 p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
347}
348
349static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
350{
351 struct dma_desc *p = (struct dma_desc *)head;
352 int i;
353
354 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
355
356 for (i = 0; i < size; i++) {
357 if (p->des0)
358 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
359 i, (unsigned int)virt_to_phys(p),
360 p->des0, p->des1, p->des2, p->des3);
361 p++;
362 }
363}
364
365static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
366{
367 p->des0 = 0;
368 p->des1 = 0;
369 p->des2 = mss;
370 p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
371}
372
373const struct stmmac_desc_ops dwmac4_desc_ops = {
374 .tx_status = dwmac4_wrback_get_tx_status,
375 .rx_status = dwmac4_wrback_get_rx_status,
376 .get_tx_len = dwmac4_rd_get_tx_len,
377 .get_tx_owner = dwmac4_get_tx_owner,
378 .set_tx_owner = dwmac4_set_tx_owner,
379 .set_rx_owner = dwmac4_set_rx_owner,
380 .get_tx_ls = dwmac4_get_tx_ls,
381 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
382 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
383 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
384 .get_timestamp = dwmac4_wrback_get_timestamp,
385 .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
386 .set_tx_ic = dwmac4_rd_set_tx_ic,
387 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
388 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
389 .release_tx_desc = dwmac4_release_tx_desc,
390 .init_rx_desc = dwmac4_rd_init_rx_desc,
391 .init_tx_desc = dwmac4_rd_init_tx_desc,
392 .display_ring = dwmac4_display_ring,
393 .set_mss = dwmac4_set_mss_ctxt,
394};
395
396const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 000000000000..0902a2edeaa9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,129 @@
1/*
2 * Header File to describe the DMA descriptors and related definitions specific
3 * for DesignWare databook 4.xx.
4 *
5 * Copyright (C) 2015 STMicroelectronics Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * Author: Alexandre Torgue <alexandre.torgue@st.com>
12 */
13
14#ifndef __DWMAC4_DESCS_H__
15#define __DWMAC4_DESCS_H__
16
17#include <linux/bitops.h>
18
19/* Normal transmit descriptor defines (without split feature) */
20
21/* TDES2 (read format) */
22#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
23#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
24#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
25#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
26#define TDES2_TIMESTAMP_ENABLE BIT(30)
27#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
28
29/* TDES3 (read format) */
30#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0)
31#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
32#define TDES3_CHECKSUM_INSERTION_SHIFT 16
33#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
34#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
35#define TDES3_HDR_LEN_SHIFT 19
36#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
37#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
38#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
39
40/* TDES3 (write back format) */
41#define TDES3_IP_HDR_ERROR BIT(0)
42#define TDES3_DEFERRED BIT(1)
43#define TDES3_UNDERFLOW_ERROR BIT(2)
44#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
45#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
46#define TDES3_COLLISION_COUNT_SHIFT 4
47#define TDES3_EXCESSIVE_COLLISION BIT(8)
48#define TDES3_LATE_COLLISION BIT(9)
49#define TDES3_NO_CARRIER BIT(10)
50#define TDES3_LOSS_CARRIER BIT(11)
51#define TDES3_PAYLOAD_ERROR BIT(12)
52#define TDES3_PACKET_FLUSHED BIT(13)
53#define TDES3_JABBER_TIMEOUT BIT(14)
54#define TDES3_ERROR_SUMMARY BIT(15)
55#define TDES3_TIMESTAMP_STATUS BIT(17)
56#define TDES3_TIMESTAMP_STATUS_SHIFT 17
57
58/* TDES3 context */
59#define TDES3_CTXT_TCMSSV BIT(26)
60
61/* TDES3 Common */
62#define TDES3_LAST_DESCRIPTOR BIT(28)
63#define TDES3_LAST_DESCRIPTOR_SHIFT 28
64#define TDES3_FIRST_DESCRIPTOR BIT(29)
65#define TDES3_CONTEXT_TYPE BIT(30)
66
67/* TDS3 use for both format (read and write back) */
68#define TDES3_OWN BIT(31)
69#define TDES3_OWN_SHIFT 31
70
71/* Normal receive descriptor defines (without split feature) */
72
73/* RDES0 (write back format) */
74#define RDES0_VLAN_TAG_MASK GENMASK(15, 0)
75
76/* RDES1 (write back format) */
77#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0)
78#define RDES1_IP_HDR_ERROR BIT(3)
79#define RDES1_IPV4_HEADER BIT(4)
80#define RDES1_IPV6_HEADER BIT(5)
81#define RDES1_IP_CSUM_BYPASSED BIT(6)
82#define RDES1_IP_CSUM_ERROR BIT(7)
83#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
84#define RDES1_PTP_PACKET_TYPE BIT(12)
85#define RDES1_PTP_VER BIT(13)
86#define RDES1_TIMESTAMP_AVAILABLE BIT(14)
87#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14
88#define RDES1_TIMESTAMP_DROPPED BIT(15)
89#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16)
90
91/* RDES2 (write back format) */
92#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0)
93#define RDES2_VLAN_FILTER_STATUS BIT(15)
94#define RDES2_SA_FILTER_FAIL BIT(16)
95#define RDES2_DA_FILTER_FAIL BIT(17)
96#define RDES2_HASH_FILTER_STATUS BIT(18)
97#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19)
98#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19)
99#define RDES2_L3_FILTER_MATCH BIT(27)
100#define RDES2_L4_FILTER_MATCH BIT(28)
101#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
102#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
103
104/* RDES3 (write back format) */
105#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
106#define RDES3_ERROR_SUMMARY BIT(15)
107#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16)
108#define RDES3_DRIBBLE_ERROR BIT(19)
109#define RDES3_RECEIVE_ERROR BIT(20)
110#define RDES3_OVERFLOW_ERROR BIT(21)
111#define RDES3_RECEIVE_WATCHDOG BIT(22)
112#define RDES3_GIANT_PACKET BIT(23)
113#define RDES3_CRC_ERROR BIT(24)
114#define RDES3_RDES0_VALID BIT(25)
115#define RDES3_RDES1_VALID BIT(26)
116#define RDES3_RDES2_VALID BIT(27)
117#define RDES3_LAST_DESCRIPTOR BIT(28)
118#define RDES3_FIRST_DESCRIPTOR BIT(29)
119#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
120
121/* RDES3 (read format) */
122#define RDES3_BUFFER1_VALID_ADDR BIT(24)
123#define RDES3_BUFFER2_VALID_ADDR BIT(25)
124#define RDES3_INT_ON_COMPLETION_EN BIT(30)
125
126/* TDS3 use for both format (read and write back) */
127#define RDES3_OWN BIT(31)
128
129#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 000000000000..116151cd6a95
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,354 @@
1/*
2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.xx has been used for developing this code.
4 *
5 * This contains the functions to handle the dma.
6 *
7 * Copyright (C) 2015 STMicroelectronics Ltd
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
14 */
15
16#include <linux/io.h>
17#include "dwmac4.h"
18#include "dwmac4_dma.h"
19
20static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
21{
22 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
23 int i;
24
25 pr_info("dwmac4: Master AXI performs %s burst length\n",
26 (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
27
28 if (axi->axi_lpi_en)
29 value |= DMA_AXI_EN_LPI;
30 if (axi->axi_xit_frm)
31 value |= DMA_AXI_LPI_XIT_FRM;
32
33 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
34 DMA_AXI_WR_OSR_LMT_SHIFT;
35
36 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
37 DMA_AXI_RD_OSR_LMT_SHIFT;
38
39 /* Depending on the UNDEF bit the Master AXI will perform any burst
40 * length according to the BLEN programmed (by default all BLEN are
41 * set).
42 */
43 for (i = 0; i < AXI_BLEN; i++) {
44 switch (axi->axi_blen[i]) {
45 case 256:
46 value |= DMA_AXI_BLEN256;
47 break;
48 case 128:
49 value |= DMA_AXI_BLEN128;
50 break;
51 case 64:
52 value |= DMA_AXI_BLEN64;
53 break;
54 case 32:
55 value |= DMA_AXI_BLEN32;
56 break;
57 case 16:
58 value |= DMA_AXI_BLEN16;
59 break;
60 case 8:
61 value |= DMA_AXI_BLEN8;
62 break;
63 case 4:
64 value |= DMA_AXI_BLEN4;
65 break;
66 }
67 }
68
69 writel(value, ioaddr + DMA_SYS_BUS_MODE);
70}
71
72static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
73 u32 dma_tx_phy, u32 dma_rx_phy,
74 u32 channel)
75{
76 u32 value;
77
78 /* set PBL for each channels. Currently we affect same configuration
79 * on each channel
80 */
81 value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
82 value = value | DMA_BUS_MODE_PBL;
83 writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
84
85 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
86 value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
87 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
88
89 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
90 value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
91 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
92
93 /* Mask interrupts by writing to CSR7 */
94 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
95
96 writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
97 writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
98}
99
100static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
101 int aal, u32 dma_tx, u32 dma_rx, int atds)
102{
103 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
104 int i;
105
106 /* Set the Fixed burst mode */
107 if (fb)
108 value |= DMA_SYS_BUS_FB;
109
110 /* Mixed Burst has no effect when fb is set */
111 if (mb)
112 value |= DMA_SYS_BUS_MB;
113
114 if (aal)
115 value |= DMA_SYS_BUS_AAL;
116
117 writel(value, ioaddr + DMA_SYS_BUS_MODE);
118
119 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
120 dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
121}
122
123static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
124{
125 pr_debug(" Channel %d\n", channel);
126 pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
127 readl(ioaddr + DMA_CHAN_CONTROL(channel)));
128 pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
129 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
130 pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
131 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
132 pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
133 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
134 pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
135 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
136 pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
137 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
138 pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
139 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
140 pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
141 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
142 pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
143 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
144 pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
145 readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
146 pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
147 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
148 pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
149 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
150 pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
151 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
152 pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
153 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
154 pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
155 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
156 pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
157 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
158 pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
159 readl(ioaddr + DMA_CHAN_STATUS(channel)));
160}
161
162static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
163{
164 int i;
165
166 pr_debug(" GMAC4 DMA registers\n");
167
168 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
169 _dwmac4_dump_dma_regs(ioaddr, i);
170}
171
172static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
173{
174 int i;
175
176 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
177 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
178}
179
180static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
181 int rxmode, u32 channel)
182{
183 u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
184
185 /* Following code only done for channel 0, other channels not yet
186 * supported.
187 */
188 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
189
190 if (txmode == SF_DMA_MODE) {
191 pr_debug("GMAC: enable TX store and forward mode\n");
192 /* Transmit COE type 2 cannot be done in cut-through mode. */
193 mtl_tx_op |= MTL_OP_MODE_TSF;
194 } else {
195 pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
196 mtl_tx_op &= ~MTL_OP_MODE_TSF;
197 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
198 /* Set the transmit threshold */
199 if (txmode <= 32)
200 mtl_tx_op |= MTL_OP_MODE_TTC_32;
201 else if (txmode <= 64)
202 mtl_tx_op |= MTL_OP_MODE_TTC_64;
203 else if (txmode <= 96)
204 mtl_tx_op |= MTL_OP_MODE_TTC_96;
205 else if (txmode <= 128)
206 mtl_tx_op |= MTL_OP_MODE_TTC_128;
207 else if (txmode <= 192)
208 mtl_tx_op |= MTL_OP_MODE_TTC_192;
209 else if (txmode <= 256)
210 mtl_tx_op |= MTL_OP_MODE_TTC_256;
211 else if (txmode <= 384)
212 mtl_tx_op |= MTL_OP_MODE_TTC_384;
213 else
214 mtl_tx_op |= MTL_OP_MODE_TTC_512;
215 }
216
217 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
218
219 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
220
221 if (rxmode == SF_DMA_MODE) {
222 pr_debug("GMAC: enable RX store and forward mode\n");
223 mtl_rx_op |= MTL_OP_MODE_RSF;
224 } else {
225 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
226 mtl_rx_op &= ~MTL_OP_MODE_RSF;
227 mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
228 if (rxmode <= 32)
229 mtl_rx_op |= MTL_OP_MODE_RTC_32;
230 else if (rxmode <= 64)
231 mtl_rx_op |= MTL_OP_MODE_RTC_64;
232 else if (rxmode <= 96)
233 mtl_rx_op |= MTL_OP_MODE_RTC_96;
234 else
235 mtl_rx_op |= MTL_OP_MODE_RTC_128;
236 }
237
238 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
239
240 /* Enable MTL RX overflow */
241 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
242 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
243 ioaddr + MTL_CHAN_INT_CTRL(channel));
244}
245
246static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
247 int rxmode, int rxfifosz)
248{
249 /* Only Channel 0 is actually configured and used */
250 dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
251}
252
253static void dwmac4_get_hw_feature(void __iomem *ioaddr,
254 struct dma_features *dma_cap)
255{
256 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
257
258 /* MAC HW feature0 */
259 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
260 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
261 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
262 dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
263 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
264 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
265 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
266 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
267 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
268 /* MMC */
269 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
270 /* IEEE 1588-2008 */
271 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
272 /* 802.3az - Energy-Efficient Ethernet (EEE) */
273 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
274 /* TX and RX csum */
275 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
276 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
277
278 /* MAC HW feature1 */
279 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
280 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
281 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
282 /* MAC HW feature2 */
283 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
284 /* TX and RX number of channels */
285 dma_cap->number_rx_channel =
286 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
287 dma_cap->number_tx_channel =
288 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
289
290 /* IEEE 1588-2002 */
291 dma_cap->time_stamp = 0;
292}
293
294/* Enable/disable TSO feature and set MSS */
295static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
296{
297 u32 value;
298
299 if (en) {
300 /* enable TSO */
301 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
302 writel(value | DMA_CONTROL_TSE,
303 ioaddr + DMA_CHAN_TX_CONTROL(chan));
304 } else {
305 /* enable TSO */
306 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
307 writel(value & ~DMA_CONTROL_TSE,
308 ioaddr + DMA_CHAN_TX_CONTROL(chan));
309 }
310}
311
312const struct stmmac_dma_ops dwmac4_dma_ops = {
313 .reset = dwmac4_dma_reset,
314 .init = dwmac4_dma_init,
315 .axi = dwmac4_dma_axi,
316 .dump_regs = dwmac4_dump_dma_regs,
317 .dma_mode = dwmac4_dma_operation_mode,
318 .enable_dma_irq = dwmac4_enable_dma_irq,
319 .disable_dma_irq = dwmac4_disable_dma_irq,
320 .start_tx = dwmac4_dma_start_tx,
321 .stop_tx = dwmac4_dma_stop_tx,
322 .start_rx = dwmac4_dma_start_rx,
323 .stop_rx = dwmac4_dma_stop_rx,
324 .dma_interrupt = dwmac4_dma_interrupt,
325 .get_hw_feature = dwmac4_get_hw_feature,
326 .rx_watchdog = dwmac4_rx_watchdog,
327 .set_rx_ring_len = dwmac4_set_rx_ring_len,
328 .set_tx_ring_len = dwmac4_set_tx_ring_len,
329 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
330 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
331 .enable_tso = dwmac4_enable_tso,
332};
333
334const struct stmmac_dma_ops dwmac410_dma_ops = {
335 .reset = dwmac4_dma_reset,
336 .init = dwmac4_dma_init,
337 .axi = dwmac4_dma_axi,
338 .dump_regs = dwmac4_dump_dma_regs,
339 .dma_mode = dwmac4_dma_operation_mode,
340 .enable_dma_irq = dwmac410_enable_dma_irq,
341 .disable_dma_irq = dwmac4_disable_dma_irq,
342 .start_tx = dwmac4_dma_start_tx,
343 .stop_tx = dwmac4_dma_stop_tx,
344 .start_rx = dwmac4_dma_start_rx,
345 .stop_rx = dwmac4_dma_stop_rx,
346 .dma_interrupt = dwmac4_dma_interrupt,
347 .get_hw_feature = dwmac4_get_hw_feature,
348 .rx_watchdog = dwmac4_rx_watchdog,
349 .set_rx_ring_len = dwmac4_set_rx_ring_len,
350 .set_tx_ring_len = dwmac4_set_tx_ring_len,
351 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
352 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
353 .enable_tso = dwmac4_enable_tso,
354};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 000000000000..1b06df749e2b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,202 @@
1/*
2 * DWMAC4 DMA Header file.
3 *
4 *
5 * Copyright (C) 2007-2015 STMicroelectronics Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * Author: Alexandre Torgue <alexandre.torgue@st.com>
12 */
13
14#ifndef __DWMAC4_DMA_H__
15#define __DWMAC4_DMA_H__
16
17/* Define the max channel number used for tx (also rx).
18 * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
19 */
20#define DMA_CHANNEL_NB_MAX 1
21
22#define DMA_BUS_MODE 0x00001000
23#define DMA_SYS_BUS_MODE 0x00001004
24#define DMA_STATUS 0x00001008
25#define DMA_DEBUG_STATUS_0 0x0000100c
26#define DMA_DEBUG_STATUS_1 0x00001010
27#define DMA_DEBUG_STATUS_2 0x00001014
28#define DMA_AXI_BUS_MODE 0x00001028
29
30/* DMA Bus Mode bitmap */
31#define DMA_BUS_MODE_SFT_RESET BIT(0)
32
33/* DMA SYS Bus Mode bitmap */
34#define DMA_BUS_MODE_SPH BIT(24)
35#define DMA_BUS_MODE_PBL BIT(16)
36#define DMA_BUS_MODE_PBL_SHIFT 16
37#define DMA_BUS_MODE_RPBL_SHIFT 16
38#define DMA_BUS_MODE_MB BIT(14)
39#define DMA_BUS_MODE_FB BIT(0)
40
41/* DMA Interrupt top status */
42#define DMA_STATUS_MAC BIT(17)
43#define DMA_STATUS_MTL BIT(16)
44#define DMA_STATUS_CHAN7 BIT(7)
45#define DMA_STATUS_CHAN6 BIT(6)
46#define DMA_STATUS_CHAN5 BIT(5)
47#define DMA_STATUS_CHAN4 BIT(4)
48#define DMA_STATUS_CHAN3 BIT(3)
49#define DMA_STATUS_CHAN2 BIT(2)
50#define DMA_STATUS_CHAN1 BIT(1)
51#define DMA_STATUS_CHAN0 BIT(0)
52
53/* DMA debug status bitmap */
54#define DMA_DEBUG_STATUS_TS_MASK 0xf
55#define DMA_DEBUG_STATUS_RS_MASK 0xf
56
57/* DMA AXI bitmap */
58#define DMA_AXI_EN_LPI BIT(31)
59#define DMA_AXI_LPI_XIT_FRM BIT(30)
60#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
61#define DMA_AXI_WR_OSR_LMT_SHIFT 24
62#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
63#define DMA_AXI_RD_OSR_LMT_SHIFT 16
64
65#define DMA_AXI_OSR_MAX 0xf
66#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
67 (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
68
69#define DMA_SYS_BUS_MB BIT(14)
70#define DMA_AXI_1KBBE BIT(13)
71#define DMA_SYS_BUS_AAL BIT(12)
72#define DMA_AXI_BLEN256 BIT(7)
73#define DMA_AXI_BLEN128 BIT(6)
74#define DMA_AXI_BLEN64 BIT(5)
75#define DMA_AXI_BLEN32 BIT(4)
76#define DMA_AXI_BLEN16 BIT(3)
77#define DMA_AXI_BLEN8 BIT(2)
78#define DMA_AXI_BLEN4 BIT(1)
79#define DMA_SYS_BUS_FB BIT(0)
80
81#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
82 DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
83 DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
84 DMA_AXI_BLEN4)
85
86#define DMA_AXI_BURST_LEN_MASK 0x000000FE
87
88/* Following DMA defines are chanels oriented */
89#define DMA_CHAN_BASE_ADDR 0x00001100
90#define DMA_CHAN_BASE_OFFSET 0x80
91#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
92 (x * DMA_CHAN_BASE_OFFSET))
93#define DMA_CHAN_REG_NUMBER 17
94
95#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
96#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
97#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
98#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
99#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
100#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
101#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
102#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
103#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
104#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
105#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
106#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
107#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
108#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
109#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
110#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
111#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
112
113/* DMA Control X */
114#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
115
116/* DMA Tx Channel X Control register defines */
117#define DMA_CONTROL_TSE BIT(12)
118#define DMA_CONTROL_OSP BIT(4)
119#define DMA_CONTROL_ST BIT(0)
120
121/* DMA Rx Channel X Control register defines */
122#define DMA_CONTROL_SR BIT(0)
123
124/* Interrupt status per channel */
125#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
126#define DMA_CHAN_STATUS_REB_SHIFT 19
127#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
128#define DMA_CHAN_STATUS_TEB_SHIFT 16
129#define DMA_CHAN_STATUS_NIS BIT(15)
130#define DMA_CHAN_STATUS_AIS BIT(14)
131#define DMA_CHAN_STATUS_CDE BIT(13)
132#define DMA_CHAN_STATUS_FBE BIT(12)
133#define DMA_CHAN_STATUS_ERI BIT(11)
134#define DMA_CHAN_STATUS_ETI BIT(10)
135#define DMA_CHAN_STATUS_RWT BIT(9)
136#define DMA_CHAN_STATUS_RPS BIT(8)
137#define DMA_CHAN_STATUS_RBU BIT(7)
138#define DMA_CHAN_STATUS_RI BIT(6)
139#define DMA_CHAN_STATUS_TBU BIT(2)
140#define DMA_CHAN_STATUS_TPS BIT(1)
141#define DMA_CHAN_STATUS_TI BIT(0)
142
143/* Interrupt enable bits per channel */
144#define DMA_CHAN_INTR_ENA_NIE BIT(16)
145#define DMA_CHAN_INTR_ENA_AIE BIT(15)
146#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
147#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
148#define DMA_CHAN_INTR_ENA_CDE BIT(13)
149#define DMA_CHAN_INTR_ENA_FBE BIT(12)
150#define DMA_CHAN_INTR_ENA_ERE BIT(11)
151#define DMA_CHAN_INTR_ENA_ETE BIT(10)
152#define DMA_CHAN_INTR_ENA_RWE BIT(9)
153#define DMA_CHAN_INTR_ENA_RSE BIT(8)
154#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
155#define DMA_CHAN_INTR_ENA_RIE BIT(6)
156#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
157#define DMA_CHAN_INTR_ENA_TSE BIT(1)
158#define DMA_CHAN_INTR_ENA_TIE BIT(0)
159
160#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
161 DMA_CHAN_INTR_ENA_RIE | \
162 DMA_CHAN_INTR_ENA_TIE)
163
164#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
165 DMA_CHAN_INTR_ENA_FBE)
166/* DMA default interrupt mask for 4.00 */
167#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
168 DMA_CHAN_INTR_ABNORMAL)
169
170#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
171 DMA_CHAN_INTR_ENA_RIE | \
172 DMA_CHAN_INTR_ENA_TIE)
173
174#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
175 DMA_CHAN_INTR_ENA_FBE)
176/* DMA default interrupt mask for 4.10a */
177#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
178 DMA_CHAN_INTR_ABNORMAL_4_10)
179
180/* channel 0 specific fields */
181#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
182#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
183#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
184#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
185
186int dwmac4_dma_reset(void __iomem *ioaddr);
187void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
188void dwmac4_enable_dma_irq(void __iomem *ioaddr);
189void dwmac410_enable_dma_irq(void __iomem *ioaddr);
190void dwmac4_disable_dma_irq(void __iomem *ioaddr);
191void dwmac4_dma_start_tx(void __iomem *ioaddr);
192void dwmac4_dma_stop_tx(void __iomem *ioaddr);
193void dwmac4_dma_start_rx(void __iomem *ioaddr);
194void dwmac4_dma_stop_rx(void __iomem *ioaddr);
195int dwmac4_dma_interrupt(void __iomem *ioaddr,
196 struct stmmac_extra_stats *x);
197void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
198void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
199void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
200void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
201
202#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 000000000000..c7326d5b2f43
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,225 @@
1/*
2 * Copyright (C) 2007-2015 STMicroelectronics Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * Author: Alexandre Torgue <alexandre.torgue@st.com>
9 */
10
11#include <linux/io.h>
12#include <linux/delay.h>
13#include "common.h"
14#include "dwmac4_dma.h"
15#include "dwmac4.h"
16
17int dwmac4_dma_reset(void __iomem *ioaddr)
18{
19 u32 value = readl(ioaddr + DMA_BUS_MODE);
20 int limit;
21
22 /* DMA SW reset */
23 value |= DMA_BUS_MODE_SFT_RESET;
24 writel(value, ioaddr + DMA_BUS_MODE);
25 limit = 10;
26 while (limit--) {
27 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
28 break;
29 mdelay(10);
30 }
31
32 if (limit < 0)
33 return -EBUSY;
34
35 return 0;
36}
37
38void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
39{
40 writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
41}
42
43void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
44{
45 writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
46}
47
48void dwmac4_dma_start_tx(void __iomem *ioaddr)
49{
50 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
51
52 value |= DMA_CONTROL_ST;
53 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
54
55 value = readl(ioaddr + GMAC_CONFIG);
56 value |= GMAC_CONFIG_TE;
57 writel(value, ioaddr + GMAC_CONFIG);
58}
59
60void dwmac4_dma_stop_tx(void __iomem *ioaddr)
61{
62 u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
63
64 value &= ~DMA_CONTROL_ST;
65 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
66
67 value = readl(ioaddr + GMAC_CONFIG);
68 value &= ~GMAC_CONFIG_TE;
69 writel(value, ioaddr + GMAC_CONFIG);
70}
71
72void dwmac4_dma_start_rx(void __iomem *ioaddr)
73{
74 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
75
76 value |= DMA_CONTROL_SR;
77
78 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
79
80 value = readl(ioaddr + GMAC_CONFIG);
81 value |= GMAC_CONFIG_RE;
82 writel(value, ioaddr + GMAC_CONFIG);
83}
84
85void dwmac4_dma_stop_rx(void __iomem *ioaddr)
86{
87 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
88
89 value &= ~DMA_CONTROL_SR;
90 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
91
92 value = readl(ioaddr + GMAC_CONFIG);
93 value &= ~GMAC_CONFIG_RE;
94 writel(value, ioaddr + GMAC_CONFIG);
95}
96
97void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
98{
99 writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
100}
101
102void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
103{
104 writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
105}
106
107void dwmac4_enable_dma_irq(void __iomem *ioaddr)
108{
109 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
110 DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
111}
112
113void dwmac410_enable_dma_irq(void __iomem *ioaddr)
114{
115 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
116 ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
117}
118
119void dwmac4_disable_dma_irq(void __iomem *ioaddr)
120{
121 writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
122}
123
124int dwmac4_dma_interrupt(void __iomem *ioaddr,
125 struct stmmac_extra_stats *x)
126{
127 int ret = 0;
128
129 u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
130
131 /* ABNORMAL interrupts */
132 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
133 if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
134 x->rx_buf_unav_irq++;
135 if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
136 x->rx_process_stopped_irq++;
137 if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
138 x->rx_watchdog_irq++;
139 if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
140 x->tx_early_irq++;
141 if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
142 x->tx_process_stopped_irq++;
143 ret = tx_hard_error;
144 }
145 if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
146 x->fatal_bus_error_irq++;
147 ret = tx_hard_error;
148 }
149 }
150 /* TX/RX NORMAL interrupts */
151 if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
152 x->normal_irq_n++;
153 if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
154 u32 value;
155
156 value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
157 /* to schedule NAPI on real RIE event. */
158 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
159 x->rx_normal_irq_n++;
160 ret |= handle_rx;
161 }
162 }
163 if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
164 x->tx_normal_irq_n++;
165 ret |= handle_tx;
166 }
167 if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
168 x->rx_early_irq++;
169 }
170
171 /* Clear the interrupt by writing a logic 1 to the chanX interrupt
172 * status [21-0] expect reserved bits [5-3]
173 */
174 writel((intr_status & 0x3fffc7),
175 ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
176
177 return ret;
178}
179
180void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
181 unsigned int high, unsigned int low)
182{
183 unsigned long data;
184
185 data = (addr[5] << 8) | addr[4];
186 /* For MAC Addr registers se have to set the Address Enable (AE)
187 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
188 * is RO.
189 */
190 data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
191 writel(data | GMAC_HI_REG_AE, ioaddr + high);
192 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
193 writel(data, ioaddr + low);
194}
195
196/* Enable disable MAC RX/TX */
197void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
198{
199 u32 value = readl(ioaddr + GMAC_CONFIG);
200
201 if (enable)
202 value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
203 else
204 value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
205
206 writel(value, ioaddr + GMAC_CONFIG);
207}
208
209void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
210 unsigned int high, unsigned int low)
211{
212 unsigned int hi_addr, lo_addr;
213
214 /* Read the MAC address from the hardware */
215 hi_addr = readl(ioaddr + high);
216 lo_addr = readl(ioaddr + low);
217
218 /* Extract the MAC address from the high and low words */
219 addr[0] = lo_addr & 0xff;
220 addr[1] = (lo_addr >> 8) & 0xff;
221 addr[2] = (lo_addr >> 16) & 0xff;
222 addr[3] = (lo_addr >> 24) & 0xff;
223 addr[4] = hi_addr & 0xff;
224 addr[5] = (hi_addr >> 8) & 0xff;
225}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index cfb018c7c5eb..38f19c99cf59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
411 } 411 }
412} 412}
413 413
414static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
415{
416 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
417 int i;
418
419 pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
420
421 for (i = 0; i < size; i++) {
422 u64 x;
423
424 x = *(u64 *)ep;
425 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
426 i, (unsigned int)virt_to_phys(ep),
427 (unsigned int)x, (unsigned int)(x >> 32),
428 ep->basic.des2, ep->basic.des3);
429 ep++;
430 }
431 pr_info("\n");
432}
433
414const struct stmmac_desc_ops enh_desc_ops = { 434const struct stmmac_desc_ops enh_desc_ops = {
415 .tx_status = enh_desc_get_tx_status, 435 .tx_status = enh_desc_get_tx_status,
416 .rx_status = enh_desc_get_rx_status, 436 .rx_status = enh_desc_get_rx_status,
@@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = {
430 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, 450 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
431 .get_timestamp = enh_desc_get_timestamp, 451 .get_timestamp = enh_desc_get_timestamp,
432 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, 452 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
453 .display_ring = enh_desc_display_ring,
433}; 454};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 192c2491330b..38a1a5603293 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -35,6 +35,10 @@
35 * current value.*/ 35 * current value.*/
36#define MMC_CNTRL_PRESET 0x10 36#define MMC_CNTRL_PRESET 0x10
37#define MMC_CNTRL_FULL_HALF_PRESET 0x20 37#define MMC_CNTRL_FULL_HALF_PRESET 0x20
38
39#define MMC_GMAC4_OFFSET 0x700
40#define MMC_GMAC3_X_OFFSET 0x100
41
38struct stmmac_counters { 42struct stmmac_counters {
39 unsigned int mmc_tx_octetcount_gb; 43 unsigned int mmc_tx_octetcount_gb;
40 unsigned int mmc_tx_framecount_gb; 44 unsigned int mmc_tx_framecount_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 3f20bb1fe570..ce9aa792857b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -28,12 +28,12 @@
28 28
29/* MAC Management Counters register offset */ 29/* MAC Management Counters register offset */
30 30
31#define MMC_CNTRL 0x00000100 /* MMC Control */ 31#define MMC_CNTRL 0x00 /* MMC Control */
32#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */ 32#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */
33#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ 33#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */
34#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ 34#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */
35#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ 35#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */
36#define MMC_DEFAULT_MASK 0xffffffff 36#define MMC_DEFAULT_MASK 0xffffffff
37 37
38/* MMC TX counter registers */ 38/* MMC TX counter registers */
39 39
@@ -41,115 +41,115 @@
41 * _GB register stands for good and bad frames 41 * _GB register stands for good and bad frames
42 * _G is for good only. 42 * _G is for good only.
43 */ 43 */
44#define MMC_TX_OCTETCOUNT_GB 0x00000114 44#define MMC_TX_OCTETCOUNT_GB 0x14
45#define MMC_TX_FRAMECOUNT_GB 0x00000118 45#define MMC_TX_FRAMECOUNT_GB 0x18
46#define MMC_TX_BROADCASTFRAME_G 0x0000011c 46#define MMC_TX_BROADCASTFRAME_G 0x1c
47#define MMC_TX_MULTICASTFRAME_G 0x00000120 47#define MMC_TX_MULTICASTFRAME_G 0x20
48#define MMC_TX_64_OCTETS_GB 0x00000124 48#define MMC_TX_64_OCTETS_GB 0x24
49#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128 49#define MMC_TX_65_TO_127_OCTETS_GB 0x28
50#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c 50#define MMC_TX_128_TO_255_OCTETS_GB 0x2c
51#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130 51#define MMC_TX_256_TO_511_OCTETS_GB 0x30
52#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134 52#define MMC_TX_512_TO_1023_OCTETS_GB 0x34
53#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138 53#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38
54#define MMC_TX_UNICAST_GB 0x0000013c 54#define MMC_TX_UNICAST_GB 0x3c
55#define MMC_TX_MULTICAST_GB 0x00000140 55#define MMC_TX_MULTICAST_GB 0x40
56#define MMC_TX_BROADCAST_GB 0x00000144 56#define MMC_TX_BROADCAST_GB 0x44
57#define MMC_TX_UNDERFLOW_ERROR 0x00000148 57#define MMC_TX_UNDERFLOW_ERROR 0x48
58#define MMC_TX_SINGLECOL_G 0x0000014c 58#define MMC_TX_SINGLECOL_G 0x4c
59#define MMC_TX_MULTICOL_G 0x00000150 59#define MMC_TX_MULTICOL_G 0x50
60#define MMC_TX_DEFERRED 0x00000154 60#define MMC_TX_DEFERRED 0x54
61#define MMC_TX_LATECOL 0x00000158 61#define MMC_TX_LATECOL 0x58
62#define MMC_TX_EXESSCOL 0x0000015c 62#define MMC_TX_EXESSCOL 0x5c
63#define MMC_TX_CARRIER_ERROR 0x00000160 63#define MMC_TX_CARRIER_ERROR 0x60
64#define MMC_TX_OCTETCOUNT_G 0x00000164 64#define MMC_TX_OCTETCOUNT_G 0x64
65#define MMC_TX_FRAMECOUNT_G 0x00000168 65#define MMC_TX_FRAMECOUNT_G 0x68
66#define MMC_TX_EXCESSDEF 0x0000016c 66#define MMC_TX_EXCESSDEF 0x6c
67#define MMC_TX_PAUSE_FRAME 0x00000170 67#define MMC_TX_PAUSE_FRAME 0x70
68#define MMC_TX_VLAN_FRAME_G 0x00000174 68#define MMC_TX_VLAN_FRAME_G 0x74
69 69
70/* MMC RX counter registers */ 70/* MMC RX counter registers */
71#define MMC_RX_FRAMECOUNT_GB 0x00000180 71#define MMC_RX_FRAMECOUNT_GB 0x80
72#define MMC_RX_OCTETCOUNT_GB 0x00000184 72#define MMC_RX_OCTETCOUNT_GB 0x84
73#define MMC_RX_OCTETCOUNT_G 0x00000188 73#define MMC_RX_OCTETCOUNT_G 0x88
74#define MMC_RX_BROADCASTFRAME_G 0x0000018c 74#define MMC_RX_BROADCASTFRAME_G 0x8c
75#define MMC_RX_MULTICASTFRAME_G 0x00000190 75#define MMC_RX_MULTICASTFRAME_G 0x90
76#define MMC_RX_CRC_ERROR 0x00000194 76#define MMC_RX_CRC_ERROR 0x94
77#define MMC_RX_ALIGN_ERROR 0x00000198 77#define MMC_RX_ALIGN_ERROR 0x98
78#define MMC_RX_RUN_ERROR 0x0000019C 78#define MMC_RX_RUN_ERROR 0x9C
79#define MMC_RX_JABBER_ERROR 0x000001A0 79#define MMC_RX_JABBER_ERROR 0xA0
80#define MMC_RX_UNDERSIZE_G 0x000001A4 80#define MMC_RX_UNDERSIZE_G 0xA4
81#define MMC_RX_OVERSIZE_G 0x000001A8 81#define MMC_RX_OVERSIZE_G 0xA8
82#define MMC_RX_64_OCTETS_GB 0x000001AC 82#define MMC_RX_64_OCTETS_GB 0xAC
83#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0 83#define MMC_RX_65_TO_127_OCTETS_GB 0xb0
84#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4 84#define MMC_RX_128_TO_255_OCTETS_GB 0xb4
85#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8 85#define MMC_RX_256_TO_511_OCTETS_GB 0xb8
86#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc 86#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc
87#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0 87#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0
88#define MMC_RX_UNICAST_G 0x000001c4 88#define MMC_RX_UNICAST_G 0xc4
89#define MMC_RX_LENGTH_ERROR 0x000001c8 89#define MMC_RX_LENGTH_ERROR 0xc8
90#define MMC_RX_AUTOFRANGETYPE 0x000001cc 90#define MMC_RX_AUTOFRANGETYPE 0xcc
91#define MMC_RX_PAUSE_FRAMES 0x000001d0 91#define MMC_RX_PAUSE_FRAMES 0xd0
92#define MMC_RX_FIFO_OVERFLOW 0x000001d4 92#define MMC_RX_FIFO_OVERFLOW 0xd4
93#define MMC_RX_VLAN_FRAMES_GB 0x000001d8 93#define MMC_RX_VLAN_FRAMES_GB 0xd8
94#define MMC_RX_WATCHDOG_ERROR 0x000001dc 94#define MMC_RX_WATCHDOG_ERROR 0xdc
95/* IPC*/ 95/* IPC*/
96#define MMC_RX_IPC_INTR_MASK 0x00000200 96#define MMC_RX_IPC_INTR_MASK 0x100
97#define MMC_RX_IPC_INTR 0x00000208 97#define MMC_RX_IPC_INTR 0x108
98/* IPv4*/ 98/* IPv4*/
99#define MMC_RX_IPV4_GD 0x00000210 99#define MMC_RX_IPV4_GD 0x110
100#define MMC_RX_IPV4_HDERR 0x00000214 100#define MMC_RX_IPV4_HDERR 0x114
101#define MMC_RX_IPV4_NOPAY 0x00000218 101#define MMC_RX_IPV4_NOPAY 0x118
102#define MMC_RX_IPV4_FRAG 0x0000021C 102#define MMC_RX_IPV4_FRAG 0x11C
103#define MMC_RX_IPV4_UDSBL 0x00000220 103#define MMC_RX_IPV4_UDSBL 0x120
104 104
105#define MMC_RX_IPV4_GD_OCTETS 0x00000250 105#define MMC_RX_IPV4_GD_OCTETS 0x150
106#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254 106#define MMC_RX_IPV4_HDERR_OCTETS 0x154
107#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258 107#define MMC_RX_IPV4_NOPAY_OCTETS 0x158
108#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c 108#define MMC_RX_IPV4_FRAG_OCTETS 0x15c
109#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260 109#define MMC_RX_IPV4_UDSBL_OCTETS 0x160
110 110
111/* IPV6*/ 111/* IPV6*/
112#define MMC_RX_IPV6_GD_OCTETS 0x00000264 112#define MMC_RX_IPV6_GD_OCTETS 0x164
113#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268 113#define MMC_RX_IPV6_HDERR_OCTETS 0x168
114#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c 114#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c
115 115
116#define MMC_RX_IPV6_GD 0x00000224 116#define MMC_RX_IPV6_GD 0x124
117#define MMC_RX_IPV6_HDERR 0x00000228 117#define MMC_RX_IPV6_HDERR 0x128
118#define MMC_RX_IPV6_NOPAY 0x0000022c 118#define MMC_RX_IPV6_NOPAY 0x12c
119 119
120/* Protocols*/ 120/* Protocols*/
121#define MMC_RX_UDP_GD 0x00000230 121#define MMC_RX_UDP_GD 0x130
122#define MMC_RX_UDP_ERR 0x00000234 122#define MMC_RX_UDP_ERR 0x134
123#define MMC_RX_TCP_GD 0x00000238 123#define MMC_RX_TCP_GD 0x138
124#define MMC_RX_TCP_ERR 0x0000023c 124#define MMC_RX_TCP_ERR 0x13c
125#define MMC_RX_ICMP_GD 0x00000240 125#define MMC_RX_ICMP_GD 0x140
126#define MMC_RX_ICMP_ERR 0x00000244 126#define MMC_RX_ICMP_ERR 0x144
127 127
128#define MMC_RX_UDP_GD_OCTETS 0x00000270 128#define MMC_RX_UDP_GD_OCTETS 0x170
129#define MMC_RX_UDP_ERR_OCTETS 0x00000274 129#define MMC_RX_UDP_ERR_OCTETS 0x174
130#define MMC_RX_TCP_GD_OCTETS 0x00000278 130#define MMC_RX_TCP_GD_OCTETS 0x178
131#define MMC_RX_TCP_ERR_OCTETS 0x0000027c 131#define MMC_RX_TCP_ERR_OCTETS 0x17c
132#define MMC_RX_ICMP_GD_OCTETS 0x00000280 132#define MMC_RX_ICMP_GD_OCTETS 0x180
133#define MMC_RX_ICMP_ERR_OCTETS 0x00000284 133#define MMC_RX_ICMP_ERR_OCTETS 0x184
134 134
135void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) 135void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
136{ 136{
137 u32 value = readl(ioaddr + MMC_CNTRL); 137 u32 value = readl(mmcaddr + MMC_CNTRL);
138 138
139 value |= (mode & 0x3F); 139 value |= (mode & 0x3F);
140 140
141 writel(value, ioaddr + MMC_CNTRL); 141 writel(value, mmcaddr + MMC_CNTRL);
142 142
143 pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", 143 pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
144 MMC_CNTRL, value); 144 MMC_CNTRL, value);
145} 145}
146 146
147/* To mask all all interrupts.*/ 147/* To mask all all interrupts.*/
148void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) 148void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
149{ 149{
150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); 150 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); 151 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
152 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); 152 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
153} 153}
154 154
155/* This reads the MAC core counters (if actaully supported). 155/* This reads the MAC core counters (if actaully supported).
@@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
157 * counter after a read. So all the field of the mmc struct 157 * counter after a read. So all the field of the mmc struct
158 * have to be incremented. 158 * have to be incremented.
159 */ 159 */
160void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) 160void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
161{ 161{
162 mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB); 162 mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
163 mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB); 163 mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
164 mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G); 164 mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
165 mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G); 165 MMC_TX_BROADCASTFRAME_G);
166 mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB); 166 mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
167 MMC_TX_MULTICASTFRAME_G);
168 mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
167 mmc->mmc_tx_65_to_127_octets_gb += 169 mmc->mmc_tx_65_to_127_octets_gb +=
168 readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB); 170 readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
169 mmc->mmc_tx_128_to_255_octets_gb += 171 mmc->mmc_tx_128_to_255_octets_gb +=
170 readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB); 172 readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
171 mmc->mmc_tx_256_to_511_octets_gb += 173 mmc->mmc_tx_256_to_511_octets_gb +=
172 readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB); 174 readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
173 mmc->mmc_tx_512_to_1023_octets_gb += 175 mmc->mmc_tx_512_to_1023_octets_gb +=
174 readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB); 176 readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
175 mmc->mmc_tx_1024_to_max_octets_gb += 177 mmc->mmc_tx_1024_to_max_octets_gb +=
176 readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); 178 readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
177 mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB); 179 mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
178 mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB); 180 mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
179 mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB); 181 mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
180 mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR); 182 mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
181 mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G); 183 mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
182 mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G); 184 mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
183 mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED); 185 mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
184 mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL); 186 mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
185 mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL); 187 mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
186 mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR); 188 mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
187 mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G); 189 mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
188 mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G); 190 mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
189 mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF); 191 mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
190 mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME); 192 mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
191 mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G); 193 mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
192 194
193 /* MMC RX counter registers */ 195 /* MMC RX counter registers */
194 mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB); 196 mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
195 mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB); 197 mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); 198 mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); 199 mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); 200 MMC_RX_BROADCASTFRAME_G);
199 mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR); 201 mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); 202 MMC_RX_MULTICASTFRAME_G);
201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); 203 mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); 204 mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
203 mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G); 205 mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
204 mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G); 206 mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
205 mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB); 207 mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
208 mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
209 mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
206 mmc->mmc_rx_65_to_127_octets_gb += 210 mmc->mmc_rx_65_to_127_octets_gb +=
207 readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB); 211 readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
208 mmc->mmc_rx_128_to_255_octets_gb += 212 mmc->mmc_rx_128_to_255_octets_gb +=
209 readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB); 213 readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
210 mmc->mmc_rx_256_to_511_octets_gb += 214 mmc->mmc_rx_256_to_511_octets_gb +=
211 readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB); 215 readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
212 mmc->mmc_rx_512_to_1023_octets_gb += 216 mmc->mmc_rx_512_to_1023_octets_gb +=
213 readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB); 217 readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
214 mmc->mmc_rx_1024_to_max_octets_gb += 218 mmc->mmc_rx_1024_to_max_octets_gb +=
215 readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); 219 readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
216 mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G); 220 mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
217 mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR); 221 mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
218 mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE); 222 mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
219 mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES); 223 mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
220 mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW); 224 mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
221 mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB); 225 mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
222 mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR); 226 mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
223 /* IPC */ 227 /* IPC */
224 mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK); 228 mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
225 mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR); 229 mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
226 /* IPv4 */ 230 /* IPv4 */
227 mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD); 231 mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
228 mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR); 232 mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
229 mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY); 233 mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
230 mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG); 234 mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
231 mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL); 235 mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
232 236
233 mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS); 237 mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
234 mmc->mmc_rx_ipv4_hderr_octets += 238 mmc->mmc_rx_ipv4_hderr_octets +=
235 readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS); 239 readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
236 mmc->mmc_rx_ipv4_nopay_octets += 240 mmc->mmc_rx_ipv4_nopay_octets +=
237 readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS); 241 readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
238 mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS); 242 mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
243 MMC_RX_IPV4_FRAG_OCTETS);
239 mmc->mmc_rx_ipv4_udsbl_octets += 244 mmc->mmc_rx_ipv4_udsbl_octets +=
240 readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS); 245 readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
241 246
242 /* IPV6 */ 247 /* IPV6 */
243 mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS); 248 mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
244 mmc->mmc_rx_ipv6_hderr_octets += 249 mmc->mmc_rx_ipv6_hderr_octets +=
245 readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS); 250 readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
246 mmc->mmc_rx_ipv6_nopay_octets += 251 mmc->mmc_rx_ipv6_nopay_octets +=
247 readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS); 252 readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
248 253
249 mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD); 254 mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
250 mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR); 255 mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
251 mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY); 256 mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
252 257
253 /* Protocols */ 258 /* Protocols */
254 mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD); 259 mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
255 mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR); 260 mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
256 mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD); 261 mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
257 mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR); 262 mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
258 mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD); 263 mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
259 mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR); 264 mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
260 265
261 mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS); 266 mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
262 mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS); 267 mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
263 mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS); 268 mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
264 mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS); 269 mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
265 mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS); 270 mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
266 mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS); 271 mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
267} 272}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 011386f6f24d..2beacd0d3043 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
279 return 1; 279 return 1;
280} 280}
281 281
282static void ndesc_display_ring(void *head, unsigned int size, bool rx)
283{
284 struct dma_desc *p = (struct dma_desc *)head;
285 int i;
286
287 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
288
289 for (i = 0; i < size; i++) {
290 u64 x;
291
292 x = *(u64 *)p;
293 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
294 i, (unsigned int)virt_to_phys(p),
295 (unsigned int)x, (unsigned int)(x >> 32),
296 p->des2, p->des3);
297 p++;
298 }
299 pr_info("\n");
300}
301
282const struct stmmac_desc_ops ndesc_ops = { 302const struct stmmac_desc_ops ndesc_ops = {
283 .tx_status = ndesc_get_tx_status, 303 .tx_status = ndesc_get_tx_status,
284 .rx_status = ndesc_get_rx_status, 304 .rx_status = ndesc_get_rx_status,
@@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = {
297 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, 317 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
298 .get_timestamp = ndesc_get_timestamp, 318 .get_timestamp = ndesc_get_timestamp,
299 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, 319 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
320 .display_ring = ndesc_display_ring,
300}; 321};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8bbab97895fe..ff6750621ff7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
24#define __STMMAC_H__ 24#define __STMMAC_H__
25 25
26#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
27#define DRV_MODULE_VERSION "Oct_2015" 27#define DRV_MODULE_VERSION "Jan_2016"
28 28
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/stmmac.h> 30#include <linux/stmmac.h>
@@ -67,6 +67,7 @@ struct stmmac_priv {
67 spinlock_t tx_lock; 67 spinlock_t tx_lock;
68 bool tx_path_in_lpi_mode; 68 bool tx_path_in_lpi_mode;
69 struct timer_list txtimer; 69 struct timer_list txtimer;
70 bool tso;
70 71
71 struct dma_desc *dma_rx ____cacheline_aligned_in_smp; 72 struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
72 struct dma_extended_desc *dma_erx; 73 struct dma_extended_desc *dma_erx;
@@ -128,6 +129,10 @@ struct stmmac_priv {
128 int use_riwt; 129 int use_riwt;
129 int irq_wake; 130 int irq_wake;
130 spinlock_t ptp_lock; 131 spinlock_t ptp_lock;
132 void __iomem *mmcaddr;
133 u32 rx_tail_addr;
134 u32 tx_tail_addr;
135 u32 mss;
131 136
132#ifdef CONFIG_DEBUG_FS 137#ifdef CONFIG_DEBUG_FS
133 struct dentry *dbgfs_dir; 138 struct dentry *dbgfs_dir;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3c7928edfebb..e2b98b01647e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
161 STMMAC_STAT(mtl_rx_fifo_ctrl_active), 161 STMMAC_STAT(mtl_rx_fifo_ctrl_active),
162 STMMAC_STAT(mac_rx_frame_ctrl_fifo), 162 STMMAC_STAT(mac_rx_frame_ctrl_fifo),
163 STMMAC_STAT(mac_gmii_rx_proto_engine), 163 STMMAC_STAT(mac_gmii_rx_proto_engine),
164 /* TSO */
165 STMMAC_STAT(tx_tso_frames),
166 STMMAC_STAT(tx_tso_nfrags),
164}; 167};
165#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 168#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
166 169
@@ -499,14 +502,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
499 int i, j = 0; 502 int i, j = 0;
500 503
501 /* Update the DMA HW counters for dwmac10/100 */ 504 /* Update the DMA HW counters for dwmac10/100 */
502 if (!priv->plat->has_gmac) 505 if (priv->hw->dma->dma_diagnostic_fr)
503 priv->hw->dma->dma_diagnostic_fr(&dev->stats, 506 priv->hw->dma->dma_diagnostic_fr(&dev->stats,
504 (void *) &priv->xstats, 507 (void *) &priv->xstats,
505 priv->ioaddr); 508 priv->ioaddr);
506 else { 509 else {
507 /* If supported, for new GMAC chips expose the MMC counters */ 510 /* If supported, for new GMAC chips expose the MMC counters */
508 if (priv->dma_cap.rmon) { 511 if (priv->dma_cap.rmon) {
509 dwmac_mmc_read(priv->ioaddr, &priv->mmc); 512 dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
510 513
511 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { 514 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
512 char *p; 515 char *p;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 78464fa7fe1f..3a13ddd3aac1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
56#include "dwmac1000.h" 56#include "dwmac1000.h"
57 57
58#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 58#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
59#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
59 60
60/* Module parameters */ 61/* Module parameters */
61#define TX_TIMEO 5000 62#define TX_TIMEO 5000
@@ -725,13 +726,15 @@ static void stmmac_adjust_link(struct net_device *dev)
725 new_state = 1; 726 new_state = 1;
726 switch (phydev->speed) { 727 switch (phydev->speed) {
727 case 1000: 728 case 1000:
728 if (likely(priv->plat->has_gmac)) 729 if (likely((priv->plat->has_gmac) ||
730 (priv->plat->has_gmac4)))
729 ctrl &= ~priv->hw->link.port; 731 ctrl &= ~priv->hw->link.port;
730 stmmac_hw_fix_mac_speed(priv); 732 stmmac_hw_fix_mac_speed(priv);
731 break; 733 break;
732 case 100: 734 case 100:
733 case 10: 735 case 10:
734 if (priv->plat->has_gmac) { 736 if (likely((priv->plat->has_gmac) ||
737 (priv->plat->has_gmac4))) {
735 ctrl |= priv->hw->link.port; 738 ctrl |= priv->hw->link.port;
736 if (phydev->speed == SPEED_100) { 739 if (phydev->speed == SPEED_100) {
737 ctrl |= priv->hw->link.speed; 740 ctrl |= priv->hw->link.speed;
@@ -877,53 +880,22 @@ static int stmmac_init_phy(struct net_device *dev)
877 return 0; 880 return 0;
878} 881}
879 882
880/**
881 * stmmac_display_ring - display ring
882 * @head: pointer to the head of the ring passed.
883 * @size: size of the ring.
884 * @extend_desc: to verify if extended descriptors are used.
885 * Description: display the control/status and buffer descriptors.
886 */
887static void stmmac_display_ring(void *head, int size, int extend_desc)
888{
889 int i;
890 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
891 struct dma_desc *p = (struct dma_desc *)head;
892
893 for (i = 0; i < size; i++) {
894 u64 x;
895 if (extend_desc) {
896 x = *(u64 *) ep;
897 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
898 i, (unsigned int)virt_to_phys(ep),
899 (unsigned int)x, (unsigned int)(x >> 32),
900 ep->basic.des2, ep->basic.des3);
901 ep++;
902 } else {
903 x = *(u64 *) p;
904 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
905 i, (unsigned int)virt_to_phys(p),
906 (unsigned int)x, (unsigned int)(x >> 32),
907 p->des2, p->des3);
908 p++;
909 }
910 pr_info("\n");
911 }
912}
913
914static void stmmac_display_rings(struct stmmac_priv *priv) 883static void stmmac_display_rings(struct stmmac_priv *priv)
915{ 884{
885 void *head_rx, *head_tx;
886
916 if (priv->extend_desc) { 887 if (priv->extend_desc) {
917 pr_info("Extended RX descriptor ring:\n"); 888 head_rx = (void *)priv->dma_erx;
918 stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); 889 head_tx = (void *)priv->dma_etx;
919 pr_info("Extended TX descriptor ring:\n");
920 stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
921 } else { 890 } else {
922 pr_info("RX descriptor ring:\n"); 891 head_rx = (void *)priv->dma_rx;
923 stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); 892 head_tx = (void *)priv->dma_tx;
924 pr_info("TX descriptor ring:\n");
925 stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
926 } 893 }
894
895 /* Display Rx ring */
896 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
897 /* Display Tx ring */
898 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
927} 899}
928 900
929static int stmmac_set_bfsize(int mtu, int bufsize) 901static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1002,7 +974,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1002 return -EINVAL; 974 return -EINVAL;
1003 } 975 }
1004 976
1005 p->des2 = priv->rx_skbuff_dma[i]; 977 if (priv->synopsys_id >= DWMAC_CORE_4_00)
978 p->des0 = priv->rx_skbuff_dma[i];
979 else
980 p->des2 = priv->rx_skbuff_dma[i];
1006 981
1007 if ((priv->hw->mode->init_desc3) && 982 if ((priv->hw->mode->init_desc3) &&
1008 (priv->dma_buf_sz == BUF_SIZE_16KiB)) 983 (priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1093,7 +1068,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1093 p = &((priv->dma_etx + i)->basic); 1068 p = &((priv->dma_etx + i)->basic);
1094 else 1069 else
1095 p = priv->dma_tx + i; 1070 p = priv->dma_tx + i;
1096 p->des2 = 0; 1071
1072 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1073 p->des0 = 0;
1074 p->des1 = 0;
1075 p->des2 = 0;
1076 p->des3 = 0;
1077 } else {
1078 p->des2 = 0;
1079 }
1080
1097 priv->tx_skbuff_dma[i].buf = 0; 1081 priv->tx_skbuff_dma[i].buf = 0;
1098 priv->tx_skbuff_dma[i].map_as_page = false; 1082 priv->tx_skbuff_dma[i].map_as_page = false;
1099 priv->tx_skbuff_dma[i].len = 0; 1083 priv->tx_skbuff_dma[i].len = 0;
@@ -1356,9 +1340,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1356 priv->tx_skbuff_dma[entry].len, 1340 priv->tx_skbuff_dma[entry].len,
1357 DMA_TO_DEVICE); 1341 DMA_TO_DEVICE);
1358 priv->tx_skbuff_dma[entry].buf = 0; 1342 priv->tx_skbuff_dma[entry].buf = 0;
1343 priv->tx_skbuff_dma[entry].len = 0;
1359 priv->tx_skbuff_dma[entry].map_as_page = false; 1344 priv->tx_skbuff_dma[entry].map_as_page = false;
1360 } 1345 }
1361 priv->hw->mode->clean_desc3(priv, p); 1346
1347 if (priv->hw->mode->clean_desc3)
1348 priv->hw->mode->clean_desc3(priv, p);
1349
1362 priv->tx_skbuff_dma[entry].last_segment = false; 1350 priv->tx_skbuff_dma[entry].last_segment = false;
1363 priv->tx_skbuff_dma[entry].is_jumbo = false; 1351 priv->tx_skbuff_dma[entry].is_jumbo = false;
1364 1352
@@ -1481,41 +1469,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1481static void stmmac_mmc_setup(struct stmmac_priv *priv) 1469static void stmmac_mmc_setup(struct stmmac_priv *priv)
1482{ 1470{
1483 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1471 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1484 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1472 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1485 1473
1486 dwmac_mmc_intr_all_mask(priv->ioaddr); 1474 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1475 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1476 else
1477 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1478
1479 dwmac_mmc_intr_all_mask(priv->mmcaddr);
1487 1480
1488 if (priv->dma_cap.rmon) { 1481 if (priv->dma_cap.rmon) {
1489 dwmac_mmc_ctrl(priv->ioaddr, mode); 1482 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1490 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 1483 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1491 } else 1484 } else
1492 pr_info(" No MAC Management Counters available\n"); 1485 pr_info(" No MAC Management Counters available\n");
1493} 1486}
1494 1487
1495/** 1488/**
1496 * stmmac_get_synopsys_id - return the SYINID.
1497 * @priv: driver private structure
1498 * Description: this simple function is to decode and return the SYINID
1499 * starting from the HW core register.
1500 */
1501static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1502{
1503 u32 hwid = priv->hw->synopsys_uid;
1504
1505 /* Check Synopsys Id (not available on old chips) */
1506 if (likely(hwid)) {
1507 u32 uid = ((hwid & 0x0000ff00) >> 8);
1508 u32 synid = (hwid & 0x000000ff);
1509
1510 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1511 uid, synid);
1512
1513 return synid;
1514 }
1515 return 0;
1516}
1517
1518/**
1519 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors 1489 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1520 * @priv: driver private structure 1490 * @priv: driver private structure
1521 * Description: select the Enhanced/Alternate or Normal descriptors. 1491 * Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1552,51 +1522,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1552 */ 1522 */
1553static int stmmac_get_hw_features(struct stmmac_priv *priv) 1523static int stmmac_get_hw_features(struct stmmac_priv *priv)
1554{ 1524{
1555 u32 hw_cap = 0; 1525 u32 ret = 0;
1556 1526
1557 if (priv->hw->dma->get_hw_feature) { 1527 if (priv->hw->dma->get_hw_feature) {
1558 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); 1528 priv->hw->dma->get_hw_feature(priv->ioaddr,
1559 1529 &priv->dma_cap);
1560 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); 1530 ret = 1;
1561 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; 1531 }
1562 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; 1532
1563 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; 1533 return ret;
1564 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1565 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1566 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1567 priv->dma_cap.pmt_remote_wake_up =
1568 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1569 priv->dma_cap.pmt_magic_frame =
1570 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1571 /* MMC */
1572 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1573 /* IEEE 1588-2002 */
1574 priv->dma_cap.time_stamp =
1575 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1576 /* IEEE 1588-2008 */
1577 priv->dma_cap.atime_stamp =
1578 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1579 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1580 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1581 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1582 /* TX and RX csum */
1583 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1584 priv->dma_cap.rx_coe_type1 =
1585 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1586 priv->dma_cap.rx_coe_type2 =
1587 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1588 priv->dma_cap.rxfifo_over_2048 =
1589 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1590 /* TX and RX number of channels */
1591 priv->dma_cap.number_rx_channel =
1592 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1593 priv->dma_cap.number_tx_channel =
1594 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1595 /* Alternate (enhanced) DESC mode */
1596 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1597 }
1598
1599 return hw_cap;
1600} 1534}
1601 1535
1602/** 1536/**
@@ -1652,8 +1586,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1652 priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1586 priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1653 aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); 1587 aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
1654 1588
1655 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && 1589 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1656 (priv->plat->axi && priv->hw->dma->axi)) 1590 priv->rx_tail_addr = priv->dma_rx_phy +
1591 (DMA_RX_SIZE * sizeof(struct dma_desc));
1592 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1593 STMMAC_CHAN0);
1594
1595 priv->tx_tail_addr = priv->dma_tx_phy +
1596 (DMA_TX_SIZE * sizeof(struct dma_desc));
1597 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1598 STMMAC_CHAN0);
1599 }
1600
1601 if (priv->plat->axi && priv->hw->dma->axi)
1657 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); 1602 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1658 1603
1659 return ret; 1604 return ret;
@@ -1733,7 +1678,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1733 } 1678 }
1734 1679
1735 /* Enable the MAC Rx/Tx */ 1680 /* Enable the MAC Rx/Tx */
1736 stmmac_set_mac(priv->ioaddr, true); 1681 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1682 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1683 else
1684 stmmac_set_mac(priv->ioaddr, true);
1737 1685
1738 /* Set the HW DMA mode and the COE */ 1686 /* Set the HW DMA mode and the COE */
1739 stmmac_dma_operation_mode(priv); 1687 stmmac_dma_operation_mode(priv);
@@ -1771,6 +1719,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1771 if (priv->pcs && priv->hw->mac->ctrl_ane) 1719 if (priv->pcs && priv->hw->mac->ctrl_ane)
1772 priv->hw->mac->ctrl_ane(priv->hw, 0); 1720 priv->hw->mac->ctrl_ane(priv->hw, 0);
1773 1721
1722 /* set TX ring length */
1723 if (priv->hw->dma->set_tx_ring_len)
1724 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1725 (DMA_TX_SIZE - 1));
1726 /* set RX ring length */
1727 if (priv->hw->dma->set_rx_ring_len)
1728 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1729 (DMA_RX_SIZE - 1));
1730 /* Enable TSO */
1731 if (priv->tso)
1732 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1733
1774 return 0; 1734 return 0;
1775} 1735}
1776 1736
@@ -1936,6 +1896,239 @@ static int stmmac_release(struct net_device *dev)
1936} 1896}
1937 1897
1938/** 1898/**
1899 * stmmac_tso_allocator - close entry point of the driver
1900 * @priv: driver private structure
1901 * @des: buffer start address
1902 * @total_len: total length to fill in descriptors
1903 * @last_segmant: condition for the last descriptor
1904 * Description:
1905 * This function fills descriptor and request new descriptors according to
1906 * buffer length to fill
1907 */
1908static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1909 int total_len, bool last_segment)
1910{
1911 struct dma_desc *desc;
1912 int tmp_len;
1913 u32 buff_size;
1914
1915 tmp_len = total_len;
1916
1917 while (tmp_len > 0) {
1918 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1919 desc = priv->dma_tx + priv->cur_tx;
1920
1921 desc->des0 = des + (total_len - tmp_len);
1922 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1923 TSO_MAX_BUFF_SIZE : tmp_len;
1924
1925 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1926 0, 1,
1927 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1928 0, 0);
1929
1930 tmp_len -= TSO_MAX_BUFF_SIZE;
1931 }
1932}
1933
1934/**
1935 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
1936 * @skb : the socket buffer
1937 * @dev : device pointer
1938 * Description: this is the transmit function that is called on TSO frames
1939 * (support available on GMAC4 and newer chips).
1940 * Diagram below show the ring programming in case of TSO frames:
1941 *
1942 * First Descriptor
1943 * --------
1944 * | DES0 |---> buffer1 = L2/L3/L4 header
1945 * | DES1 |---> TCP Payload (can continue on next descr...)
1946 * | DES2 |---> buffer 1 and 2 len
1947 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
1948 * --------
1949 * |
1950 * ...
1951 * |
1952 * --------
1953 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
1954 * | DES1 | --|
1955 * | DES2 | --> buffer 1 and 2 len
1956 * | DES3 |
1957 * --------
1958 *
1959 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
1960 */
1961static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1962{
1963 u32 pay_len, mss;
1964 int tmp_pay_len = 0;
1965 struct stmmac_priv *priv = netdev_priv(dev);
1966 int nfrags = skb_shinfo(skb)->nr_frags;
1967 unsigned int first_entry, des;
1968 struct dma_desc *desc, *first, *mss_desc = NULL;
1969 u8 proto_hdr_len;
1970 int i;
1971
1972 spin_lock(&priv->tx_lock);
1973
1974 /* Compute header lengths */
1975 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1976
1977 /* Desc availability based on threshold should be enough safe */
1978 if (unlikely(stmmac_tx_avail(priv) <
1979 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
1980 if (!netif_queue_stopped(dev)) {
1981 netif_stop_queue(dev);
1982 /* This is a hard error, log it. */
1983 pr_err("%s: Tx Ring full when queue awake\n", __func__);
1984 }
1985 spin_unlock(&priv->tx_lock);
1986 return NETDEV_TX_BUSY;
1987 }
1988
1989 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
1990
1991 mss = skb_shinfo(skb)->gso_size;
1992
1993 /* set new MSS value if needed */
1994 if (mss != priv->mss) {
1995 mss_desc = priv->dma_tx + priv->cur_tx;
1996 priv->hw->desc->set_mss(mss_desc, mss);
1997 priv->mss = mss;
1998 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1999 }
2000
2001 if (netif_msg_tx_queued(priv)) {
2002 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2003 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2004 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2005 skb->data_len);
2006 }
2007
2008 first_entry = priv->cur_tx;
2009
2010 desc = priv->dma_tx + first_entry;
2011 first = desc;
2012
2013 /* first descriptor: fill Headers on Buf1 */
2014 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2015 DMA_TO_DEVICE);
2016 if (dma_mapping_error(priv->device, des))
2017 goto dma_map_err;
2018
2019 priv->tx_skbuff_dma[first_entry].buf = des;
2020 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2021 priv->tx_skbuff[first_entry] = skb;
2022
2023 first->des0 = des;
2024
2025 /* Fill start of payload in buff2 of first descriptor */
2026 if (pay_len)
2027 first->des1 = des + proto_hdr_len;
2028
2029 /* If needed take extra descriptors to fill the remaining payload */
2030 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2031
2032 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2033
2034 /* Prepare fragments */
2035 for (i = 0; i < nfrags; i++) {
2036 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2037
2038 des = skb_frag_dma_map(priv->device, frag, 0,
2039 skb_frag_size(frag),
2040 DMA_TO_DEVICE);
2041
2042 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2043 (i == nfrags - 1));
2044
2045 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2046 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2047 priv->tx_skbuff[priv->cur_tx] = NULL;
2048 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2049 }
2050
2051 priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2052
2053 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2054
2055 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2056 if (netif_msg_hw(priv))
2057 pr_debug("%s: stop transmitted packets\n", __func__);
2058 netif_stop_queue(dev);
2059 }
2060
2061 dev->stats.tx_bytes += skb->len;
2062 priv->xstats.tx_tso_frames++;
2063 priv->xstats.tx_tso_nfrags += nfrags;
2064
2065 /* Manage tx mitigation */
2066 priv->tx_count_frames += nfrags + 1;
2067 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2068 mod_timer(&priv->txtimer,
2069 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2070 } else {
2071 priv->tx_count_frames = 0;
2072 priv->hw->desc->set_tx_ic(desc);
2073 priv->xstats.tx_set_ic_bit++;
2074 }
2075
2076 if (!priv->hwts_tx_en)
2077 skb_tx_timestamp(skb);
2078
2079 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2080 priv->hwts_tx_en)) {
2081 /* declare that device is doing timestamping */
2082 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2083 priv->hw->desc->enable_tx_timestamp(first);
2084 }
2085
2086 /* Complete the first descriptor before granting the DMA */
2087 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2088 proto_hdr_len,
2089 pay_len,
2090 1, priv->tx_skbuff_dma[first_entry].last_segment,
2091 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2092
2093 /* If context desc is used to change MSS */
2094 if (mss_desc)
2095 priv->hw->desc->set_tx_owner(mss_desc);
2096
2097 /* The own bit must be the latest setting done when prepare the
2098 * descriptor and then barrier is needed to make sure that
2099 * all is coherent before granting the DMA engine.
2100 */
2101 smp_wmb();
2102
2103 if (netif_msg_pktdata(priv)) {
2104 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2105 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2106 priv->cur_tx, first, nfrags);
2107
2108 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2109 0);
2110
2111 pr_info(">>> frame to be transmitted: ");
2112 print_pkt(skb->data, skb_headlen(skb));
2113 }
2114
2115 netdev_sent_queue(dev, skb->len);
2116
2117 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2118 STMMAC_CHAN0);
2119
2120 spin_unlock(&priv->tx_lock);
2121 return NETDEV_TX_OK;
2122
2123dma_map_err:
2124 spin_unlock(&priv->tx_lock);
2125 dev_err(priv->device, "Tx dma map failed\n");
2126 dev_kfree_skb(skb);
2127 priv->dev->stats.tx_dropped++;
2128 return NETDEV_TX_OK;
2129}
2130
2131/**
1939 * stmmac_xmit - Tx entry point of the driver 2132 * stmmac_xmit - Tx entry point of the driver
1940 * @skb : the socket buffer 2133 * @skb : the socket buffer
1941 * @dev : device pointer 2134 * @dev : device pointer
@@ -1952,6 +2145,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1952 unsigned int entry, first_entry; 2145 unsigned int entry, first_entry;
1953 struct dma_desc *desc, *first; 2146 struct dma_desc *desc, *first;
1954 unsigned int enh_desc; 2147 unsigned int enh_desc;
2148 unsigned int des;
2149
2150 /* Manage oversized TCP frames for GMAC4 device */
2151 if (skb_is_gso(skb) && priv->tso) {
2152 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2153 return stmmac_tso_xmit(skb, dev);
2154 }
1955 2155
1956 spin_lock(&priv->tx_lock); 2156 spin_lock(&priv->tx_lock);
1957 2157
@@ -1987,7 +2187,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1987 if (enh_desc) 2187 if (enh_desc)
1988 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); 2188 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
1989 2189
1990 if (unlikely(is_jumbo)) { 2190 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2191 DWMAC_CORE_4_00)) {
1991 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); 2192 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
1992 if (unlikely(entry < 0)) 2193 if (unlikely(entry < 0))
1993 goto dma_map_err; 2194 goto dma_map_err;
@@ -2005,13 +2206,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2005 else 2206 else
2006 desc = priv->dma_tx + entry; 2207 desc = priv->dma_tx + entry;
2007 2208
2008 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 2209 des = skb_frag_dma_map(priv->device, frag, 0, len,
2009 DMA_TO_DEVICE); 2210 DMA_TO_DEVICE);
2010 if (dma_mapping_error(priv->device, desc->des2)) 2211 if (dma_mapping_error(priv->device, des))
2011 goto dma_map_err; /* should reuse desc w/o issues */ 2212 goto dma_map_err; /* should reuse desc w/o issues */
2012 2213
2013 priv->tx_skbuff[entry] = NULL; 2214 priv->tx_skbuff[entry] = NULL;
2014 priv->tx_skbuff_dma[entry].buf = desc->des2; 2215
2216 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2217 desc->des0 = des;
2218 priv->tx_skbuff_dma[entry].buf = desc->des0;
2219 } else {
2220 desc->des2 = des;
2221 priv->tx_skbuff_dma[entry].buf = desc->des2;
2222 }
2223
2015 priv->tx_skbuff_dma[entry].map_as_page = true; 2224 priv->tx_skbuff_dma[entry].map_as_page = true;
2016 priv->tx_skbuff_dma[entry].len = len; 2225 priv->tx_skbuff_dma[entry].len = len;
2017 priv->tx_skbuff_dma[entry].last_segment = last_segment; 2226 priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2026,16 +2235,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2026 priv->cur_tx = entry; 2235 priv->cur_tx = entry;
2027 2236
2028 if (netif_msg_pktdata(priv)) { 2237 if (netif_msg_pktdata(priv)) {
2238 void *tx_head;
2239
2029 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 2240 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2030 __func__, priv->cur_tx, priv->dirty_tx, first_entry, 2241 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2031 entry, first, nfrags); 2242 entry, first, nfrags);
2032 2243
2033 if (priv->extend_desc) 2244 if (priv->extend_desc)
2034 stmmac_display_ring((void *)priv->dma_etx, 2245 tx_head = (void *)priv->dma_etx;
2035 DMA_TX_SIZE, 1);
2036 else 2246 else
2037 stmmac_display_ring((void *)priv->dma_tx, 2247 tx_head = (void *)priv->dma_tx;
2038 DMA_TX_SIZE, 0); 2248
2249 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2039 2250
2040 pr_debug(">>> frame to be transmitted: "); 2251 pr_debug(">>> frame to be transmitted: ");
2041 print_pkt(skb->data, skb->len); 2252 print_pkt(skb->data, skb->len);
@@ -2074,12 +2285,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2074 if (likely(!is_jumbo)) { 2285 if (likely(!is_jumbo)) {
2075 bool last_segment = (nfrags == 0); 2286 bool last_segment = (nfrags == 0);
2076 2287
2077 first->des2 = dma_map_single(priv->device, skb->data, 2288 des = dma_map_single(priv->device, skb->data,
2078 nopaged_len, DMA_TO_DEVICE); 2289 nopaged_len, DMA_TO_DEVICE);
2079 if (dma_mapping_error(priv->device, first->des2)) 2290 if (dma_mapping_error(priv->device, des))
2080 goto dma_map_err; 2291 goto dma_map_err;
2081 2292
2082 priv->tx_skbuff_dma[first_entry].buf = first->des2; 2293 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2294 first->des0 = des;
2295 priv->tx_skbuff_dma[first_entry].buf = first->des0;
2296 } else {
2297 first->des2 = des;
2298 priv->tx_skbuff_dma[first_entry].buf = first->des2;
2299 }
2300
2083 priv->tx_skbuff_dma[first_entry].len = nopaged_len; 2301 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2084 priv->tx_skbuff_dma[first_entry].last_segment = last_segment; 2302 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2085 2303
@@ -2103,7 +2321,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2103 } 2321 }
2104 2322
2105 netdev_sent_queue(dev, skb->len); 2323 netdev_sent_queue(dev, skb->len);
2106 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2324
2325 if (priv->synopsys_id < DWMAC_CORE_4_00)
2326 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2327 else
2328 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2329 STMMAC_CHAN0);
2107 2330
2108 spin_unlock(&priv->tx_lock); 2331 spin_unlock(&priv->tx_lock);
2109 return NETDEV_TX_OK; 2332 return NETDEV_TX_OK;
@@ -2185,9 +2408,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2185 dev_kfree_skb(skb); 2408 dev_kfree_skb(skb);
2186 break; 2409 break;
2187 } 2410 }
2188 p->des2 = priv->rx_skbuff_dma[entry];
2189 2411
2190 priv->hw->mode->refill_desc3(priv, p); 2412 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2413 p->des0 = priv->rx_skbuff_dma[entry];
2414 p->des1 = 0;
2415 } else {
2416 p->des2 = priv->rx_skbuff_dma[entry];
2417 }
2418 if (priv->hw->mode->refill_desc3)
2419 priv->hw->mode->refill_desc3(priv, p);
2191 2420
2192 if (priv->rx_zeroc_thresh > 0) 2421 if (priv->rx_zeroc_thresh > 0)
2193 priv->rx_zeroc_thresh--; 2422 priv->rx_zeroc_thresh--;
@@ -2195,9 +2424,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2195 if (netif_msg_rx_status(priv)) 2424 if (netif_msg_rx_status(priv))
2196 pr_debug("\trefill entry #%d\n", entry); 2425 pr_debug("\trefill entry #%d\n", entry);
2197 } 2426 }
2198
2199 wmb(); 2427 wmb();
2200 priv->hw->desc->set_rx_owner(p); 2428
2429 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2430 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2431 else
2432 priv->hw->desc->set_rx_owner(p);
2433
2201 wmb(); 2434 wmb();
2202 2435
2203 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 2436 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2220,13 +2453,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2220 int coe = priv->hw->rx_csum; 2453 int coe = priv->hw->rx_csum;
2221 2454
2222 if (netif_msg_rx_status(priv)) { 2455 if (netif_msg_rx_status(priv)) {
2456 void *rx_head;
2457
2223 pr_debug("%s: descriptor ring:\n", __func__); 2458 pr_debug("%s: descriptor ring:\n", __func__);
2224 if (priv->extend_desc) 2459 if (priv->extend_desc)
2225 stmmac_display_ring((void *)priv->dma_erx, 2460 rx_head = (void *)priv->dma_erx;
2226 DMA_RX_SIZE, 1);
2227 else 2461 else
2228 stmmac_display_ring((void *)priv->dma_rx, 2462 rx_head = (void *)priv->dma_rx;
2229 DMA_RX_SIZE, 0); 2463
2464 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2230 } 2465 }
2231 while (count < limit) { 2466 while (count < limit) {
2232 int status; 2467 int status;
@@ -2276,11 +2511,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2276 } else { 2511 } else {
2277 struct sk_buff *skb; 2512 struct sk_buff *skb;
2278 int frame_len; 2513 int frame_len;
2514 unsigned int des;
2515
2516 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2517 des = p->des0;
2518 else
2519 des = p->des2;
2279 2520
2280 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2521 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2281 2522
2282 /* check if frame_len fits the preallocated memory */ 2523 /* If frame length is greather than skb buffer size
2524 * (preallocated during init) then the packet is
2525 * ignored
2526 */
2283 if (frame_len > priv->dma_buf_sz) { 2527 if (frame_len > priv->dma_buf_sz) {
2528 pr_err("%s: len %d larger than size (%d)\n",
2529 priv->dev->name, frame_len,
2530 priv->dma_buf_sz);
2284 priv->dev->stats.rx_length_errors++; 2531 priv->dev->stats.rx_length_errors++;
2285 break; 2532 break;
2286 } 2533 }
@@ -2293,14 +2540,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2293 2540
2294 if (netif_msg_rx_status(priv)) { 2541 if (netif_msg_rx_status(priv)) {
2295 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2542 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2296 p, entry, p->des2); 2543 p, entry, des);
2297 if (frame_len > ETH_FRAME_LEN) 2544 if (frame_len > ETH_FRAME_LEN)
2298 pr_debug("\tframe size %d, COE: %d\n", 2545 pr_debug("\tframe size %d, COE: %d\n",
2299 frame_len, status); 2546 frame_len, status);
2300 } 2547 }
2301 2548
2302 if (unlikely((frame_len < priv->rx_copybreak) || 2549 /* The zero-copy is always used for all the sizes
2303 stmmac_rx_threshold_count(priv))) { 2550 * in case of GMAC4 because it needs
2551 * to refill the used descriptors, always.
2552 */
2553 if (unlikely(!priv->plat->has_gmac4 &&
2554 ((frame_len < priv->rx_copybreak) ||
2555 stmmac_rx_threshold_count(priv)))) {
2304 skb = netdev_alloc_skb_ip_align(priv->dev, 2556 skb = netdev_alloc_skb_ip_align(priv->dev,
2305 frame_len); 2557 frame_len);
2306 if (unlikely(!skb)) { 2558 if (unlikely(!skb)) {
@@ -2452,7 +2704,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2452 return -EBUSY; 2704 return -EBUSY;
2453 } 2705 }
2454 2706
2455 if (priv->plat->enh_desc) 2707 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
2456 max_mtu = JUMBO_LEN; 2708 max_mtu = JUMBO_LEN;
2457 else 2709 else
2458 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 2710 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2466,6 +2718,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2466 } 2718 }
2467 2719
2468 dev->mtu = new_mtu; 2720 dev->mtu = new_mtu;
2721
2469 netdev_update_features(dev); 2722 netdev_update_features(dev);
2470 2723
2471 return 0; 2724 return 0;
@@ -2490,6 +2743,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
2490 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 2743 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2491 features &= ~NETIF_F_CSUM_MASK; 2744 features &= ~NETIF_F_CSUM_MASK;
2492 2745
2746 /* Disable tso if asked by ethtool */
2747 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2748 if (features & NETIF_F_TSO)
2749 priv->tso = true;
2750 else
2751 priv->tso = false;
2752 }
2753
2493 return features; 2754 return features;
2494} 2755}
2495 2756
@@ -2536,7 +2797,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2536 } 2797 }
2537 2798
2538 /* To handle GMAC own interrupts */ 2799 /* To handle GMAC own interrupts */
2539 if (priv->plat->has_gmac) { 2800 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2540 int status = priv->hw->mac->host_irq_status(priv->hw, 2801 int status = priv->hw->mac->host_irq_status(priv->hw,
2541 &priv->xstats); 2802 &priv->xstats);
2542 if (unlikely(status)) { 2803 if (unlikely(status)) {
@@ -2545,6 +2806,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2545 priv->tx_path_in_lpi_mode = true; 2806 priv->tx_path_in_lpi_mode = true;
2546 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2807 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2547 priv->tx_path_in_lpi_mode = false; 2808 priv->tx_path_in_lpi_mode = false;
2809 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
2810 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2811 priv->rx_tail_addr,
2812 STMMAC_CHAN0);
2548 } 2813 }
2549 } 2814 }
2550 2815
@@ -2617,15 +2882,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
2617 x = *(u64 *) ep; 2882 x = *(u64 *) ep;
2618 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2883 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2619 i, (unsigned int)virt_to_phys(ep), 2884 i, (unsigned int)virt_to_phys(ep),
2620 (unsigned int)x, (unsigned int)(x >> 32), 2885 ep->basic.des0, ep->basic.des1,
2621 ep->basic.des2, ep->basic.des3); 2886 ep->basic.des2, ep->basic.des3);
2622 ep++; 2887 ep++;
2623 } else { 2888 } else {
2624 x = *(u64 *) p; 2889 x = *(u64 *) p;
2625 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 2890 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2626 i, (unsigned int)virt_to_phys(ep), 2891 i, (unsigned int)virt_to_phys(ep),
2627 (unsigned int)x, (unsigned int)(x >> 32), 2892 p->des0, p->des1, p->des2, p->des3);
2628 p->des2, p->des3);
2629 p++; 2893 p++;
2630 } 2894 }
2631 seq_printf(seq, "\n"); 2895 seq_printf(seq, "\n");
@@ -2708,10 +2972,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2708 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 2972 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2709 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 2973 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2710 (priv->dma_cap.tx_coe) ? "Y" : "N"); 2974 (priv->dma_cap.tx_coe) ? "Y" : "N");
2711 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 2975 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2712 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 2976 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
2713 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 2977 (priv->dma_cap.rx_coe) ? "Y" : "N");
2714 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 2978 } else {
2979 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2980 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2981 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2982 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2983 }
2715 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 2984 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2716 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 2985 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2717 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 2986 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2820,27 +3089,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2820 priv->dev->priv_flags |= IFF_UNICAST_FLT; 3089 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2821 mac = dwmac1000_setup(priv->ioaddr, 3090 mac = dwmac1000_setup(priv->ioaddr,
2822 priv->plat->multicast_filter_bins, 3091 priv->plat->multicast_filter_bins,
2823 priv->plat->unicast_filter_entries); 3092 priv->plat->unicast_filter_entries,
3093 &priv->synopsys_id);
3094 } else if (priv->plat->has_gmac4) {
3095 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3096 mac = dwmac4_setup(priv->ioaddr,
3097 priv->plat->multicast_filter_bins,
3098 priv->plat->unicast_filter_entries,
3099 &priv->synopsys_id);
2824 } else { 3100 } else {
2825 mac = dwmac100_setup(priv->ioaddr); 3101 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
2826 } 3102 }
2827 if (!mac) 3103 if (!mac)
2828 return -ENOMEM; 3104 return -ENOMEM;
2829 3105
2830 priv->hw = mac; 3106 priv->hw = mac;
2831 3107
2832 /* Get and dump the chip ID */
2833 priv->synopsys_id = stmmac_get_synopsys_id(priv);
2834
2835 /* To use the chained or ring mode */ 3108 /* To use the chained or ring mode */
2836 if (chain_mode) { 3109 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2837 priv->hw->mode = &chain_mode_ops; 3110 priv->hw->mode = &dwmac4_ring_mode_ops;
2838 pr_info(" Chain mode enabled\n");
2839 priv->mode = STMMAC_CHAIN_MODE;
2840 } else { 3111 } else {
2841 priv->hw->mode = &ring_mode_ops; 3112 if (chain_mode) {
2842 pr_info(" Ring mode enabled\n"); 3113 priv->hw->mode = &chain_mode_ops;
2843 priv->mode = STMMAC_RING_MODE; 3114 pr_info(" Chain mode enabled\n");
3115 priv->mode = STMMAC_CHAIN_MODE;
3116 } else {
3117 priv->hw->mode = &ring_mode_ops;
3118 pr_info(" Ring mode enabled\n");
3119 priv->mode = STMMAC_RING_MODE;
3120 }
2844 } 3121 }
2845 3122
2846 /* Get the HW capability (new GMAC newer than 3.50a) */ 3123 /* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2856,11 +3133,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2856 priv->plat->enh_desc = priv->dma_cap.enh_desc; 3133 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2857 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 3134 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2858 3135
2859 /* TXCOE doesn't work in thresh DMA mode */ 3136 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2860 if (priv->plat->force_thresh_dma_mode) 3137 /* In case of GMAC4 rx_coe is from HW cap register. */
2861 priv->plat->tx_coe = 0; 3138 priv->plat->rx_coe = priv->dma_cap.rx_coe;
2862 else
2863 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2864 3139
2865 if (priv->dma_cap.rx_coe_type2) 3140 if (priv->dma_cap.rx_coe_type2)
2866 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 3141 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
@@ -2870,13 +3145,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2870 } else 3145 } else
2871 pr_info(" No HW DMA feature register supported"); 3146 pr_info(" No HW DMA feature register supported");
2872 3147
2873 /* To use alternate (extended) or normal descriptor structures */ 3148 /* To use alternate (extended), normal or GMAC4 descriptor structures */
2874 stmmac_selec_desc_mode(priv); 3149 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3150 priv->hw->desc = &dwmac4_desc_ops;
3151 else
3152 stmmac_selec_desc_mode(priv);
2875 3153
2876 if (priv->plat->rx_coe) { 3154 if (priv->plat->rx_coe) {
2877 priv->hw->rx_csum = priv->plat->rx_coe; 3155 priv->hw->rx_csum = priv->plat->rx_coe;
2878 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 3156 pr_info(" RX Checksum Offload Engine supported\n");
2879 priv->plat->rx_coe); 3157 if (priv->synopsys_id < DWMAC_CORE_4_00)
3158 pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
2880 } 3159 }
2881 if (priv->plat->tx_coe) 3160 if (priv->plat->tx_coe)
2882 pr_info(" TX Checksum insertion supported\n"); 3161 pr_info(" TX Checksum insertion supported\n");
@@ -2886,6 +3165,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2886 device_set_wakeup_capable(priv->device, 1); 3165 device_set_wakeup_capable(priv->device, 1);
2887 } 3166 }
2888 3167
3168 if (priv->dma_cap.tsoen)
3169 pr_info(" TSO supported\n");
3170
2889 return 0; 3171 return 0;
2890} 3172}
2891 3173
@@ -2989,6 +3271,12 @@ int stmmac_dvr_probe(struct device *device,
2989 3271
2990 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3272 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2991 NETIF_F_RXCSUM; 3273 NETIF_F_RXCSUM;
3274
3275 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3276 ndev->hw_features |= NETIF_F_TSO;
3277 priv->tso = true;
3278 pr_info(" TSO feature enabled\n");
3279 }
2992 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 3280 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2993 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 3281 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2994#ifdef STMMAC_VLAN_TAG_USED 3282#ifdef STMMAC_VLAN_TAG_USED
@@ -3183,6 +3471,11 @@ int stmmac_resume(struct net_device *ndev)
3183 priv->dirty_rx = 0; 3471 priv->dirty_rx = 0;
3184 priv->dirty_tx = 0; 3472 priv->dirty_tx = 0;
3185 priv->cur_tx = 0; 3473 priv->cur_tx = 0;
3474 /* reset private mss value to force mss context settings at
3475 * next tso xmit (only used for gmac4).
3476 */
3477 priv->mss = 0;
3478
3186 stmmac_clear_descriptors(priv); 3479 stmmac_clear_descriptors(priv);
3187 3480
3188 stmmac_hw_setup(ndev, false); 3481 stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cf37ea558ecc..effaa4ff5ab7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -284,6 +284,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
284 plat->pmt = 1; 284 plat->pmt = 1;
285 } 285 }
286 286
287 if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
288 of_device_is_compatible(np, "snps,dwmac-4.10a")) {
289 plat->has_gmac4 = 1;
290 plat->pmt = 1;
291 plat->tso_en = of_property_read_bool(np, "snps,tso");
292 }
293
287 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 294 if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
288 of_device_is_compatible(np, "snps,dwmac-3.710")) { 295 of_device_is_compatible(np, "snps,dwmac-3.710")) {
289 plat->enh_desc = 1; 296 plat->enh_desc = 1;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index e6bc30a42a74..ffdaca9c01af 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -137,5 +137,7 @@ struct plat_stmmacenet_data {
137 void (*exit)(struct platform_device *pdev, void *priv); 137 void (*exit)(struct platform_device *pdev, void *priv);
138 void *bsp_priv; 138 void *bsp_priv;
139 struct stmmac_axi *axi; 139 struct stmmac_axi *axi;
140 int has_gmac4;
141 bool tso_en;
140}; 142};
141#endif 143#endif