summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-06 19:24:30 -0400
committerDavid S. Miller <davem@davemloft.net>2019-06-06 19:24:30 -0400
commit96524ea4be04683bb3ad8ebaedcbe3a6e34302de (patch)
tree1aa7e943c5d445b4eb21299ea3e3871a8027b59c
parent40ae25505fe834648ce4aa70b073ee934942bfdb (diff)
parentf5203a3d9b25fa28a40bfc27c05a4020c6430f06 (diff)
Merge branch 'Xilinx-axienet-driver-updates'
Robert Hancock says: ==================== Xilinx axienet driver updates (v5) This is a series of enhancements and bug fixes in order to get the mainline version of this driver into a more generally usable state, including on x86 or ARM platforms. It also converts the driver to use the phylink API in order to provide support for SFP modules. Changes since v4: -Use reverse christmas tree variable order ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_axienet.txt29
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c677
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c111
5 files changed, 593 insertions, 265 deletions
diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
index 38f9ec076743..7360617cdedb 100644
--- a/Documentation/devicetree/bindings/net/xilinx_axienet.txt
+++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
@@ -17,8 +17,15 @@ For more details about mdio please refer phy.txt file in the same directory.
17Required properties: 17Required properties:
18- compatible : Must be one of "xlnx,axi-ethernet-1.00.a", 18- compatible : Must be one of "xlnx,axi-ethernet-1.00.a",
19 "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a" 19 "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a"
20- reg : Address and length of the IO space. 20- reg : Address and length of the IO space, as well as the address
21- interrupts : Should be a list of two interrupt, TX and RX. 21 and length of the AXI DMA controller IO space, unless
22 axistream-connected is specified, in which case the reg
23 attribute of the node referenced by it is used.
24- interrupts : Should be a list of 2 or 3 interrupts: TX DMA, RX DMA,
25 and optionally Ethernet core. If axistream-connected is
26 specified, the TX/RX DMA interrupts should be on that node
27 instead, and only the Ethernet core interrupt is optionally
28 specified here.
22- phy-handle : Should point to the external phy device. 29- phy-handle : Should point to the external phy device.
23 See ethernet.txt file in the same directory. 30 See ethernet.txt file in the same directory.
24- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware 31- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
@@ -31,15 +38,29 @@ Optional properties:
31 1 to enable partial TX checksum offload, 38 1 to enable partial TX checksum offload,
32 2 to enable full TX checksum offload 39 2 to enable full TX checksum offload
33- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload 40- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload
41- clocks : AXI bus clock for the device. Refer to common clock bindings.
42 Used to calculate MDIO clock divisor. If not specified, it is
43 auto-detected from the CPU clock (but only on platforms where
44 this is possible). New device trees should specify this - the
45 auto detection is only for backward compatibility.
46- axistream-connected: Reference to another node which contains the resources
47 for the AXI DMA controller used by this device.
48 If this is specified, the DMA-related resources from that
49 device (DMA registers and DMA TX/RX interrupts) rather
50 than this one will be used.
51 - mdio : Child node for MDIO bus. Must be defined if PHY access is
52 required through the core's MDIO interface (i.e. always,
53 unless the PHY is accessed through a different bus).
34 54
35Example: 55Example:
36 axi_ethernet_eth: ethernet@40c00000 { 56 axi_ethernet_eth: ethernet@40c00000 {
37 compatible = "xlnx,axi-ethernet-1.00.a"; 57 compatible = "xlnx,axi-ethernet-1.00.a";
38 device_type = "network"; 58 device_type = "network";
39 interrupt-parent = <&microblaze_0_axi_intc>; 59 interrupt-parent = <&microblaze_0_axi_intc>;
40 interrupts = <2 0>; 60 interrupts = <2 0 1>;
61 clocks = <&axi_clk>;
41 phy-mode = "mii"; 62 phy-mode = "mii";
42 reg = <0x40c00000 0x40000>; 63 reg = <0x40c00000 0x40000 0x50c00000 0x40000>;
43 xlnx,rxcsum = <0x2>; 64 xlnx,rxcsum = <0x2>;
44 xlnx,rxmem = <0x800>; 65 xlnx,rxmem = <0x800>;
45 xlnx,txcsum = <0x2>; 66 xlnx,txcsum = <0x2>;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index af96e05c5bcd..8d994cebb6b0 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,7 @@
6config NET_VENDOR_XILINX 6config NET_VENDOR_XILINX
7 bool "Xilinx devices" 7 bool "Xilinx devices"
8 default y 8 default y
9 depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST 9 depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y. 11 If you have a network (Ethernet) card belonging to this class, say Y.
12 12
@@ -26,8 +26,8 @@ config XILINX_EMACLITE
26 26
27config XILINX_AXI_EMAC 27config XILINX_AXI_EMAC
28 tristate "Xilinx 10/100/1000 AXI Ethernet support" 28 tristate "Xilinx 10/100/1000 AXI Ethernet support"
29 depends on MICROBLAZE 29 depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
30 select PHYLIB 30 select PHYLINK
31 ---help--- 31 ---help---
32 This driver supports the 10/100/1000 Ethernet from Xilinx for the 32 This driver supports the 10/100/1000 Ethernet from Xilinx for the
33 AXI bus interface used in Xilinx Virtex FPGAs. 33 AXI bus interface used in Xilinx Virtex FPGAs.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 011adae32b89..2dacfc85b3ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -13,6 +13,7 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/if_vlan.h> 15#include <linux/if_vlan.h>
16#include <linux/phylink.h>
16 17
17/* Packet size info */ 18/* Packet size info */
18#define XAE_HDR_SIZE 14 /* Size of Ethernet header */ 19#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -83,6 +84,8 @@
83#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */ 84#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
84#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */ 85#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
85 86
87#define XAXIDMA_SR_HALT_MASK 0x00000001 /* Indicates DMA channel halted */
88
86#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */ 89#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
87#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */ 90#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
88#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */ 91#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
@@ -356,9 +359,6 @@
356 * @app2: MM2S/S2MM User Application Field 2. 359 * @app2: MM2S/S2MM User Application Field 2.
357 * @app3: MM2S/S2MM User Application Field 3. 360 * @app3: MM2S/S2MM User Application Field 3.
358 * @app4: MM2S/S2MM User Application Field 4. 361 * @app4: MM2S/S2MM User Application Field 4.
359 * @sw_id_offset: MM2S/S2MM Sw ID
360 * @reserved5: Reserved and not used
361 * @reserved6: Reserved and not used
362 */ 362 */
363struct axidma_bd { 363struct axidma_bd {
364 u32 next; /* Physical address of next buffer descriptor */ 364 u32 next; /* Physical address of next buffer descriptor */
@@ -373,11 +373,9 @@ struct axidma_bd {
373 u32 app1; /* TX start << 16 | insert */ 373 u32 app1; /* TX start << 16 | insert */
374 u32 app2; /* TX csum seed */ 374 u32 app2; /* TX csum seed */
375 u32 app3; 375 u32 app3;
376 u32 app4; 376 u32 app4; /* Last field used by HW */
377 u32 sw_id_offset; 377 struct sk_buff *skb;
378 u32 reserved5; 378} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
379 u32 reserved6;
380};
381 379
382/** 380/**
383 * struct axienet_local - axienet private per device data 381 * struct axienet_local - axienet private per device data
@@ -385,6 +383,7 @@ struct axidma_bd {
385 * @dev: Pointer to device structure 383 * @dev: Pointer to device structure
386 * @phy_node: Pointer to device node structure 384 * @phy_node: Pointer to device node structure
387 * @mii_bus: Pointer to MII bus structure 385 * @mii_bus: Pointer to MII bus structure
386 * @regs_start: Resource start for axienet device addresses
388 * @regs: Base address for the axienet_local device address space 387 * @regs: Base address for the axienet_local device address space
389 * @dma_regs: Base address for the axidma device address space 388 * @dma_regs: Base address for the axidma device address space
390 * @dma_err_tasklet: Tasklet structure to process Axi DMA errors 389 * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
@@ -422,10 +421,17 @@ struct axienet_local {
422 /* Connection to PHY device */ 421 /* Connection to PHY device */
423 struct device_node *phy_node; 422 struct device_node *phy_node;
424 423
424 struct phylink *phylink;
425 struct phylink_config phylink_config;
426
427 /* Clock for AXI bus */
428 struct clk *clk;
429
425 /* MDIO bus data */ 430 /* MDIO bus data */
426 struct mii_bus *mii_bus; /* MII bus reference */ 431 struct mii_bus *mii_bus; /* MII bus reference */
427 432
428 /* IO registers, dma functions and IRQs */ 433 /* IO registers, dma functions and IRQs */
434 resource_size_t regs_start;
429 void __iomem *regs; 435 void __iomem *regs;
430 void __iomem *dma_regs; 436 void __iomem *dma_regs;
431 437
@@ -433,17 +439,19 @@ struct axienet_local {
433 439
434 int tx_irq; 440 int tx_irq;
435 int rx_irq; 441 int rx_irq;
442 int eth_irq;
436 phy_interface_t phy_mode; 443 phy_interface_t phy_mode;
437 444
438 u32 options; /* Current options word */ 445 u32 options; /* Current options word */
439 u32 last_link;
440 u32 features; 446 u32 features;
441 447
442 /* Buffer descriptors */ 448 /* Buffer descriptors */
443 struct axidma_bd *tx_bd_v; 449 struct axidma_bd *tx_bd_v;
444 dma_addr_t tx_bd_p; 450 dma_addr_t tx_bd_p;
451 u32 tx_bd_num;
445 struct axidma_bd *rx_bd_v; 452 struct axidma_bd *rx_bd_v;
446 dma_addr_t rx_bd_p; 453 dma_addr_t rx_bd_p;
454 u32 rx_bd_num;
447 u32 tx_bd_ci; 455 u32 tx_bd_ci;
448 u32 tx_bd_tail; 456 u32 tx_bd_tail;
449 u32 rx_bd_ci; 457 u32 rx_bd_ci;
@@ -481,7 +489,7 @@ struct axienet_option {
481 */ 489 */
482static inline u32 axienet_ior(struct axienet_local *lp, off_t offset) 490static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
483{ 491{
484 return in_be32(lp->regs + offset); 492 return ioread32(lp->regs + offset);
485} 493}
486 494
487static inline u32 axinet_ior_read_mcr(struct axienet_local *lp) 495static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
@@ -501,12 +509,13 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
501static inline void axienet_iow(struct axienet_local *lp, off_t offset, 509static inline void axienet_iow(struct axienet_local *lp, off_t offset,
502 u32 value) 510 u32 value)
503{ 511{
504 out_be32((lp->regs + offset), value); 512 iowrite32(value, lp->regs + offset);
505} 513}
506 514
507/* Function prototypes visible in xilinx_axienet_mdio.c for other files */ 515/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
508int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np); 516int axienet_mdio_enable(struct axienet_local *lp);
509int axienet_mdio_wait_until_ready(struct axienet_local *lp); 517void axienet_mdio_disable(struct axienet_local *lp);
518int axienet_mdio_setup(struct axienet_local *lp);
510void axienet_mdio_teardown(struct axienet_local *lp); 519void axienet_mdio_teardown(struct axienet_local *lp);
511 520
512#endif /* XILINX_AXI_ENET_H */ 521#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 831967f6eff8..da420c881662 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,6 +7,7 @@
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
10 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
11 * 12 *
12 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -21,6 +22,7 @@
21 * - Add support for extended VLAN support. 22 * - Add support for extended VLAN support.
22 */ 23 */
23 24
25#include <linux/clk.h>
24#include <linux/delay.h> 26#include <linux/delay.h>
25#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
26#include <linux/module.h> 28#include <linux/module.h>
@@ -38,16 +40,18 @@
38 40
39#include "xilinx_axienet.h" 41#include "xilinx_axienet.h"
40 42
41/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 43/* Descriptors defines for Tx and Rx DMA */
42#define TX_BD_NUM 64 44#define TX_BD_NUM_DEFAULT 64
43#define RX_BD_NUM 128 45#define RX_BD_NUM_DEFAULT 1024
46#define TX_BD_NUM_MAX 4096
47#define RX_BD_NUM_MAX 4096
44 48
45/* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 49/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
46#define DRIVER_NAME "xaxienet" 50#define DRIVER_NAME "xaxienet"
47#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 51#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
48#define DRIVER_VERSION "1.00a" 52#define DRIVER_VERSION "1.00a"
49 53
50#define AXIENET_REGS_N 32 54#define AXIENET_REGS_N 40
51 55
52/* Match table for of_platform binding */ 56/* Match table for of_platform binding */
53static const struct of_device_id axienet_of_match[] = { 57static const struct of_device_id axienet_of_match[] = {
@@ -125,7 +129,7 @@ static struct axienet_option axienet_options[] = {
125 */ 129 */
126static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 130static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
127{ 131{
128 return in_be32(lp->dma_regs + reg); 132 return ioread32(lp->dma_regs + reg);
129} 133}
130 134
131/** 135/**
@@ -140,7 +144,7 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
140static inline void axienet_dma_out32(struct axienet_local *lp, 144static inline void axienet_dma_out32(struct axienet_local *lp,
141 off_t reg, u32 value) 145 off_t reg, u32 value)
142{ 146{
143 out_be32((lp->dma_regs + reg), value); 147 iowrite32(value, lp->dma_regs + reg);
144} 148}
145 149
146/** 150/**
@@ -156,22 +160,21 @@ static void axienet_dma_bd_release(struct net_device *ndev)
156 int i; 160 int i;
157 struct axienet_local *lp = netdev_priv(ndev); 161 struct axienet_local *lp = netdev_priv(ndev);
158 162
159 for (i = 0; i < RX_BD_NUM; i++) { 163 for (i = 0; i < lp->rx_bd_num; i++) {
160 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 164 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
161 lp->max_frm_size, DMA_FROM_DEVICE); 165 lp->max_frm_size, DMA_FROM_DEVICE);
162 dev_kfree_skb((struct sk_buff *) 166 dev_kfree_skb(lp->rx_bd_v[i].skb);
163 (lp->rx_bd_v[i].sw_id_offset));
164 } 167 }
165 168
166 if (lp->rx_bd_v) { 169 if (lp->rx_bd_v) {
167 dma_free_coherent(ndev->dev.parent, 170 dma_free_coherent(ndev->dev.parent,
168 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 171 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
169 lp->rx_bd_v, 172 lp->rx_bd_v,
170 lp->rx_bd_p); 173 lp->rx_bd_p);
171 } 174 }
172 if (lp->tx_bd_v) { 175 if (lp->tx_bd_v) {
173 dma_free_coherent(ndev->dev.parent, 176 dma_free_coherent(ndev->dev.parent,
174 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 177 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
175 lp->tx_bd_v, 178 lp->tx_bd_v,
176 lp->tx_bd_p); 179 lp->tx_bd_p);
177 } 180 }
@@ -201,33 +204,33 @@ static int axienet_dma_bd_init(struct net_device *ndev)
201 204
202 /* Allocate the Tx and Rx buffer descriptors. */ 205 /* Allocate the Tx and Rx buffer descriptors. */
203 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 206 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
204 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 207 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
205 &lp->tx_bd_p, GFP_KERNEL); 208 &lp->tx_bd_p, GFP_KERNEL);
206 if (!lp->tx_bd_v) 209 if (!lp->tx_bd_v)
207 goto out; 210 goto out;
208 211
209 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 212 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
210 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 213 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
211 &lp->rx_bd_p, GFP_KERNEL); 214 &lp->rx_bd_p, GFP_KERNEL);
212 if (!lp->rx_bd_v) 215 if (!lp->rx_bd_v)
213 goto out; 216 goto out;
214 217
215 for (i = 0; i < TX_BD_NUM; i++) { 218 for (i = 0; i < lp->tx_bd_num; i++) {
216 lp->tx_bd_v[i].next = lp->tx_bd_p + 219 lp->tx_bd_v[i].next = lp->tx_bd_p +
217 sizeof(*lp->tx_bd_v) * 220 sizeof(*lp->tx_bd_v) *
218 ((i + 1) % TX_BD_NUM); 221 ((i + 1) % lp->tx_bd_num);
219 } 222 }
220 223
221 for (i = 0; i < RX_BD_NUM; i++) { 224 for (i = 0; i < lp->rx_bd_num; i++) {
222 lp->rx_bd_v[i].next = lp->rx_bd_p + 225 lp->rx_bd_v[i].next = lp->rx_bd_p +
223 sizeof(*lp->rx_bd_v) * 226 sizeof(*lp->rx_bd_v) *
224 ((i + 1) % RX_BD_NUM); 227 ((i + 1) % lp->rx_bd_num);
225 228
226 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 229 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
227 if (!skb) 230 if (!skb)
228 goto out; 231 goto out;
229 232
230 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 233 lp->rx_bd_v[i].skb = skb;
231 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
232 skb->data, 235 skb->data,
233 lp->max_frm_size, 236 lp->max_frm_size,
@@ -269,7 +272,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 272 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
270 cr | XAXIDMA_CR_RUNSTOP_MASK); 273 cr | XAXIDMA_CR_RUNSTOP_MASK);
271 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 274 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
272 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 275 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
273 276
274 /* Write to the RS (Run-stop) bit in the Tx channel control register. 277 /* Write to the RS (Run-stop) bit in the Tx channel control register.
275 * Tx channel is now ready to run. But only after we write to the 278 * Tx channel is now ready to run. But only after we write to the
@@ -434,17 +437,20 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
434 lp->options |= options; 437 lp->options |= options;
435} 438}
436 439
437static void __axienet_device_reset(struct axienet_local *lp, off_t offset) 440static void __axienet_device_reset(struct axienet_local *lp)
438{ 441{
439 u32 timeout; 442 u32 timeout;
440 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 443 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
441 * process of Axi DMA takes a while to complete as all pending 444 * process of Axi DMA takes a while to complete as all pending
442 * commands/transfers will be flushed or completed during this 445 * commands/transfers will be flushed or completed during this
443 * reset process. 446 * reset process.
447 * Note that even though both TX and RX have their own reset register,
448 * they both reset the entire DMA core, so only one needs to be used.
444 */ 449 */
445 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 450 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
446 timeout = DELAY_OF_ONE_MILLISEC; 451 timeout = DELAY_OF_ONE_MILLISEC;
447 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 452 while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
453 XAXIDMA_CR_RESET_MASK) {
448 udelay(1); 454 udelay(1);
449 if (--timeout == 0) { 455 if (--timeout == 0) {
450 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 456 netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
@@ -470,8 +476,7 @@ static void axienet_device_reset(struct net_device *ndev)
470 u32 axienet_status; 476 u32 axienet_status;
471 struct axienet_local *lp = netdev_priv(ndev); 477 struct axienet_local *lp = netdev_priv(ndev);
472 478
473 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 479 __axienet_device_reset(lp);
474 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
475 480
476 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 481 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
477 lp->options |= XAE_OPTION_VLAN; 482 lp->options |= XAE_OPTION_VLAN;
@@ -498,6 +503,8 @@ static void axienet_device_reset(struct net_device *ndev)
498 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 503 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
499 if (axienet_status & XAE_INT_RXRJECT_MASK) 504 if (axienet_status & XAE_INT_RXRJECT_MASK)
500 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 505 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
506 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
507 XAE_INT_RECV_ERROR_MASK : 0);
501 508
502 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 509 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
503 510
@@ -514,63 +521,6 @@ static void axienet_device_reset(struct net_device *ndev)
514} 521}
515 522
516/** 523/**
517 * axienet_adjust_link - Adjust the PHY link speed/duplex.
518 * @ndev: Pointer to the net_device structure
519 *
520 * This function is called to change the speed and duplex setting after
521 * auto negotiation is done by the PHY. This is the function that gets
522 * registered with the PHY interface through the "of_phy_connect" call.
523 */
524static void axienet_adjust_link(struct net_device *ndev)
525{
526 u32 emmc_reg;
527 u32 link_state;
528 u32 setspeed = 1;
529 struct axienet_local *lp = netdev_priv(ndev);
530 struct phy_device *phy = ndev->phydev;
531
532 link_state = phy->speed | (phy->duplex << 1) | phy->link;
533 if (lp->last_link != link_state) {
534 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
535 if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX)
536 setspeed = 0;
537 } else {
538 if ((phy->speed == SPEED_1000) &&
539 (lp->phy_mode == PHY_INTERFACE_MODE_MII))
540 setspeed = 0;
541 }
542
543 if (setspeed == 1) {
544 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
545 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
546
547 switch (phy->speed) {
548 case SPEED_1000:
549 emmc_reg |= XAE_EMMC_LINKSPD_1000;
550 break;
551 case SPEED_100:
552 emmc_reg |= XAE_EMMC_LINKSPD_100;
553 break;
554 case SPEED_10:
555 emmc_reg |= XAE_EMMC_LINKSPD_10;
556 break;
557 default:
558 dev_err(&ndev->dev, "Speed other than 10, 100 "
559 "or 1Gbps is not supported\n");
560 break;
561 }
562
563 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
564 lp->last_link = link_state;
565 phy_print_status(phy);
566 } else {
567 netdev_err(ndev,
568 "Error setting Axi Ethernet mac speed\n");
569 }
570 }
571}
572
573/**
574 * axienet_start_xmit_done - Invoked once a transmit is completed by the 524 * axienet_start_xmit_done - Invoked once a transmit is completed by the
575 * Axi DMA Tx channel. 525 * Axi DMA Tx channel.
576 * @ndev: Pointer to the net_device structure 526 * @ndev: Pointer to the net_device structure
@@ -595,26 +545,31 @@ static void axienet_start_xmit_done(struct net_device *ndev)
595 dma_unmap_single(ndev->dev.parent, cur_p->phys, 545 dma_unmap_single(ndev->dev.parent, cur_p->phys,
596 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 546 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
597 DMA_TO_DEVICE); 547 DMA_TO_DEVICE);
598 if (cur_p->app4) 548 if (cur_p->skb)
599 dev_consume_skb_irq((struct sk_buff *)cur_p->app4); 549 dev_consume_skb_irq(cur_p->skb);
600 /*cur_p->phys = 0;*/ 550 /*cur_p->phys = 0;*/
601 cur_p->app0 = 0; 551 cur_p->app0 = 0;
602 cur_p->app1 = 0; 552 cur_p->app1 = 0;
603 cur_p->app2 = 0; 553 cur_p->app2 = 0;
604 cur_p->app4 = 0; 554 cur_p->app4 = 0;
605 cur_p->status = 0; 555 cur_p->status = 0;
556 cur_p->skb = NULL;
606 557
607 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 558 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
608 packets++; 559 packets++;
609 560
610 ++lp->tx_bd_ci; 561 if (++lp->tx_bd_ci >= lp->tx_bd_num)
611 lp->tx_bd_ci %= TX_BD_NUM; 562 lp->tx_bd_ci = 0;
612 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 563 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
613 status = cur_p->status; 564 status = cur_p->status;
614 } 565 }
615 566
616 ndev->stats.tx_packets += packets; 567 ndev->stats.tx_packets += packets;
617 ndev->stats.tx_bytes += size; 568 ndev->stats.tx_bytes += size;
569
570 /* Matches barrier in axienet_start_xmit */
571 smp_mb();
572
618 netif_wake_queue(ndev); 573 netif_wake_queue(ndev);
619} 574}
620 575
@@ -635,7 +590,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
635 int num_frag) 590 int num_frag)
636{ 591{
637 struct axidma_bd *cur_p; 592 struct axidma_bd *cur_p;
638 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 593 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
639 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 594 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
640 return NETDEV_TX_BUSY; 595 return NETDEV_TX_BUSY;
641 return 0; 596 return 0;
@@ -670,9 +625,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
670 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 625 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
671 626
672 if (axienet_check_tx_bd_space(lp, num_frag)) { 627 if (axienet_check_tx_bd_space(lp, num_frag)) {
673 if (!netif_queue_stopped(ndev)) 628 if (netif_queue_stopped(ndev))
674 netif_stop_queue(ndev); 629 return NETDEV_TX_BUSY;
675 return NETDEV_TX_BUSY; 630
631 netif_stop_queue(ndev);
632
633 /* Matches barrier in axienet_start_xmit_done */
634 smp_mb();
635
636 /* Space might have just been freed - check again */
637 if (axienet_check_tx_bd_space(lp, num_frag))
638 return NETDEV_TX_BUSY;
639
640 netif_wake_queue(ndev);
676 } 641 }
677 642
678 if (skb->ip_summed == CHECKSUM_PARTIAL) { 643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -695,8 +660,8 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
695 skb_headlen(skb), DMA_TO_DEVICE); 660 skb_headlen(skb), DMA_TO_DEVICE);
696 661
697 for (ii = 0; ii < num_frag; ii++) { 662 for (ii = 0; ii < num_frag; ii++) {
698 ++lp->tx_bd_tail; 663 if (++lp->tx_bd_tail >= lp->tx_bd_num)
699 lp->tx_bd_tail %= TX_BD_NUM; 664 lp->tx_bd_tail = 0;
700 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 665 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
701 frag = &skb_shinfo(skb)->frags[ii]; 666 frag = &skb_shinfo(skb)->frags[ii];
702 cur_p->phys = dma_map_single(ndev->dev.parent, 667 cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -707,13 +672,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
707 } 672 }
708 673
709 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 674 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
710 cur_p->app4 = (unsigned long)skb; 675 cur_p->skb = skb;
711 676
712 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 677 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
713 /* Start the transfer */ 678 /* Start the transfer */
714 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 679 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
715 ++lp->tx_bd_tail; 680 if (++lp->tx_bd_tail >= lp->tx_bd_num)
716 lp->tx_bd_tail %= TX_BD_NUM; 681 lp->tx_bd_tail = 0;
717 682
718 return NETDEV_TX_OK; 683 return NETDEV_TX_OK;
719} 684}
@@ -742,13 +707,15 @@ static void axienet_recv(struct net_device *ndev)
742 707
743 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 708 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
744 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 709 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
745 skb = (struct sk_buff *) (cur_p->sw_id_offset);
746 length = cur_p->app4 & 0x0000FFFF;
747 710
748 dma_unmap_single(ndev->dev.parent, cur_p->phys, 711 dma_unmap_single(ndev->dev.parent, cur_p->phys,
749 lp->max_frm_size, 712 lp->max_frm_size,
750 DMA_FROM_DEVICE); 713 DMA_FROM_DEVICE);
751 714
715 skb = cur_p->skb;
716 cur_p->skb = NULL;
717 length = cur_p->app4 & 0x0000FFFF;
718
752 skb_put(skb, length); 719 skb_put(skb, length);
753 skb->protocol = eth_type_trans(skb, ndev); 720 skb->protocol = eth_type_trans(skb, ndev);
754 /*skb_checksum_none_assert(skb);*/ 721 /*skb_checksum_none_assert(skb);*/
@@ -783,10 +750,10 @@ static void axienet_recv(struct net_device *ndev)
783 DMA_FROM_DEVICE); 750 DMA_FROM_DEVICE);
784 cur_p->cntrl = lp->max_frm_size; 751 cur_p->cntrl = lp->max_frm_size;
785 cur_p->status = 0; 752 cur_p->status = 0;
786 cur_p->sw_id_offset = (u32) new_skb; 753 cur_p->skb = new_skb;
787 754
788 ++lp->rx_bd_ci; 755 if (++lp->rx_bd_ci >= lp->rx_bd_num)
789 lp->rx_bd_ci %= RX_BD_NUM; 756 lp->rx_bd_ci = 0;
790 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 757 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
791 } 758 }
792 759
@@ -802,7 +769,7 @@ static void axienet_recv(struct net_device *ndev)
802 * @irq: irq number 769 * @irq: irq number
803 * @_ndev: net_device pointer 770 * @_ndev: net_device pointer
804 * 771 *
805 * Return: IRQ_HANDLED for all cases. 772 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
806 * 773 *
807 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 774 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
808 * to complete the BD processing. 775 * to complete the BD processing.
@@ -821,7 +788,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
821 goto out; 788 goto out;
822 } 789 }
823 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 790 if (!(status & XAXIDMA_IRQ_ALL_MASK))
824 dev_err(&ndev->dev, "No interrupts asserted in Tx path\n"); 791 return IRQ_NONE;
825 if (status & XAXIDMA_IRQ_ERROR_MASK) { 792 if (status & XAXIDMA_IRQ_ERROR_MASK) {
826 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 793 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
827 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 794 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -851,7 +818,7 @@ out:
851 * @irq: irq number 818 * @irq: irq number
852 * @_ndev: net_device pointer 819 * @_ndev: net_device pointer
853 * 820 *
854 * Return: IRQ_HANDLED for all cases. 821 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
855 * 822 *
856 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 823 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
857 * processing. 824 * processing.
@@ -870,7 +837,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
870 goto out; 837 goto out;
871 } 838 }
872 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 839 if (!(status & XAXIDMA_IRQ_ALL_MASK))
873 dev_err(&ndev->dev, "No interrupts asserted in Rx path\n"); 840 return IRQ_NONE;
874 if (status & XAXIDMA_IRQ_ERROR_MASK) { 841 if (status & XAXIDMA_IRQ_ERROR_MASK) {
875 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 842 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
876 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 843 dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -895,6 +862,35 @@ out:
895 return IRQ_HANDLED; 862 return IRQ_HANDLED;
896} 863}
897 864
865/**
866 * axienet_eth_irq - Ethernet core Isr.
867 * @irq: irq number
868 * @_ndev: net_device pointer
869 *
870 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
871 *
872 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
873 */
874static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
875{
876 struct net_device *ndev = _ndev;
877 struct axienet_local *lp = netdev_priv(ndev);
878 unsigned int pending;
879
880 pending = axienet_ior(lp, XAE_IP_OFFSET);
881 if (!pending)
882 return IRQ_NONE;
883
884 if (pending & XAE_INT_RXFIFOOVR_MASK)
885 ndev->stats.rx_missed_errors++;
886
887 if (pending & XAE_INT_RXRJECT_MASK)
888 ndev->stats.rx_frame_errors++;
889
890 axienet_iow(lp, XAE_IS_OFFSET, pending);
891 return IRQ_HANDLED;
892}
893
898static void axienet_dma_err_handler(unsigned long data); 894static void axienet_dma_err_handler(unsigned long data);
899 895
900/** 896/**
@@ -904,67 +900,72 @@ static void axienet_dma_err_handler(unsigned long data);
904 * Return: 0, on success. 900 * Return: 0, on success.
905 * non-zero error value on failure 901 * non-zero error value on failure
906 * 902 *
907 * This is the driver open routine. It calls phy_start to start the PHY device. 903 * This is the driver open routine. It calls phylink_start to start the
904 * PHY device.
908 * It also allocates interrupt service routines, enables the interrupt lines 905 * It also allocates interrupt service routines, enables the interrupt lines
909 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 906 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
910 * descriptors are initialized. 907 * descriptors are initialized.
911 */ 908 */
912static int axienet_open(struct net_device *ndev) 909static int axienet_open(struct net_device *ndev)
913{ 910{
914 int ret, mdio_mcreg; 911 int ret;
915 struct axienet_local *lp = netdev_priv(ndev); 912 struct axienet_local *lp = netdev_priv(ndev);
916 struct phy_device *phydev = NULL;
917 913
918 dev_dbg(&ndev->dev, "axienet_open()\n"); 914 dev_dbg(&ndev->dev, "axienet_open()\n");
919 915
920 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
921 ret = axienet_mdio_wait_until_ready(lp);
922 if (ret < 0)
923 return ret;
924 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 916 /* Disable the MDIO interface till Axi Ethernet Reset is completed.
925 * When we do an Axi Ethernet reset, it resets the complete core 917 * When we do an Axi Ethernet reset, it resets the complete core
926 * including the MDIO. If MDIO is not disabled when the reset 918 * including the MDIO. MDIO must be disabled before resetting
927 * process is started, MDIO will be broken afterwards. 919 * and re-enabled afterwards.
920 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
928 */ 921 */
929 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 922 mutex_lock(&lp->mii_bus->mdio_lock);
930 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 923 axienet_mdio_disable(lp);
931 axienet_device_reset(ndev); 924 axienet_device_reset(ndev);
932 /* Enable the MDIO */ 925 ret = axienet_mdio_enable(lp);
933 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 926 mutex_unlock(&lp->mii_bus->mdio_lock);
934 ret = axienet_mdio_wait_until_ready(lp);
935 if (ret < 0) 927 if (ret < 0)
936 return ret; 928 return ret;
937 929
938 if (lp->phy_node) { 930 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
939 phydev = of_phy_connect(lp->ndev, lp->phy_node, 931 if (ret) {
940 axienet_adjust_link, 0, lp->phy_mode); 932 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
941 933 return ret;
942 if (!phydev)
943 dev_err(lp->dev, "of_phy_connect() failed\n");
944 else
945 phy_start(phydev);
946 } 934 }
947 935
936 phylink_start(lp->phylink);
937
948 /* Enable tasklets for Axi DMA error handling */ 938 /* Enable tasklets for Axi DMA error handling */
949 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 939 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
950 (unsigned long) lp); 940 (unsigned long) lp);
951 941
952 /* Enable interrupts for Axi DMA Tx */ 942 /* Enable interrupts for Axi DMA Tx */
953 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 943 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
944 ndev->name, ndev);
954 if (ret) 945 if (ret)
955 goto err_tx_irq; 946 goto err_tx_irq;
956 /* Enable interrupts for Axi DMA Rx */ 947 /* Enable interrupts for Axi DMA Rx */
957 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 948 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
949 ndev->name, ndev);
958 if (ret) 950 if (ret)
959 goto err_rx_irq; 951 goto err_rx_irq;
952 /* Enable interrupts for Axi Ethernet core (if defined) */
953 if (lp->eth_irq > 0) {
954 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
955 ndev->name, ndev);
956 if (ret)
957 goto err_eth_irq;
958 }
960 959
961 return 0; 960 return 0;
962 961
962err_eth_irq:
963 free_irq(lp->rx_irq, ndev);
963err_rx_irq: 964err_rx_irq:
964 free_irq(lp->tx_irq, ndev); 965 free_irq(lp->tx_irq, ndev);
965err_tx_irq: 966err_tx_irq:
966 if (phydev) 967 phylink_stop(lp->phylink);
967 phy_disconnect(phydev); 968 phylink_disconnect_phy(lp->phylink);
968 tasklet_kill(&lp->dma_err_tasklet); 969 tasklet_kill(&lp->dma_err_tasklet);
969 dev_err(lp->dev, "request_irq() failed\n"); 970 dev_err(lp->dev, "request_irq() failed\n");
970 return ret; 971 return ret;
@@ -976,34 +977,61 @@ err_tx_irq:
976 * 977 *
977 * Return: 0, on success. 978 * Return: 0, on success.
978 * 979 *
979 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 980 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
980 * device. It also removes the interrupt handlers and disables the interrupts. 981 * device. It also removes the interrupt handlers and disables the interrupts.
981 * The Axi DMA Tx/Rx BDs are released. 982 * The Axi DMA Tx/Rx BDs are released.
982 */ 983 */
983static int axienet_stop(struct net_device *ndev) 984static int axienet_stop(struct net_device *ndev)
984{ 985{
985 u32 cr; 986 u32 cr, sr;
987 int count;
986 struct axienet_local *lp = netdev_priv(ndev); 988 struct axienet_local *lp = netdev_priv(ndev);
987 989
988 dev_dbg(&ndev->dev, "axienet_close()\n"); 990 dev_dbg(&ndev->dev, "axienet_close()\n");
989 991
990 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 992 phylink_stop(lp->phylink);
991 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 993 phylink_disconnect_phy(lp->phylink);
992 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 994
993 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
994 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
995 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
996 axienet_setoptions(ndev, lp->options & 995 axienet_setoptions(ndev, lp->options &
997 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 996 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
998 997
998 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
999 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
1000 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1001
1002 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1003 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
1004 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1005
1006 axienet_iow(lp, XAE_IE_OFFSET, 0);
1007
1008 /* Give DMAs a chance to halt gracefully */
1009 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1010 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
1011 msleep(20);
1012 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1013 }
1014
1015 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1016 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
1017 msleep(20);
1018 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1019 }
1020
1021 /* Do a reset to ensure DMA is really stopped */
1022 mutex_lock(&lp->mii_bus->mdio_lock);
1023 axienet_mdio_disable(lp);
1024 __axienet_device_reset(lp);
1025 axienet_mdio_enable(lp);
1026 mutex_unlock(&lp->mii_bus->mdio_lock);
1027
999 tasklet_kill(&lp->dma_err_tasklet); 1028 tasklet_kill(&lp->dma_err_tasklet);
1000 1029
1030 if (lp->eth_irq > 0)
1031 free_irq(lp->eth_irq, ndev);
1001 free_irq(lp->tx_irq, ndev); 1032 free_irq(lp->tx_irq, ndev);
1002 free_irq(lp->rx_irq, ndev); 1033 free_irq(lp->rx_irq, ndev);
1003 1034
1004 if (ndev->phydev)
1005 phy_disconnect(ndev->phydev);
1006
1007 axienet_dma_bd_release(ndev); 1035 axienet_dma_bd_release(ndev);
1008 return 0; 1036 return 0;
1009} 1037}
@@ -1151,6 +1179,48 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
1151 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1179 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1152 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1180 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1153 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1181 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1182 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1183 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1184 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1185 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1186 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1187 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1188 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1189 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1190}
1191
1192static void axienet_ethtools_get_ringparam(struct net_device *ndev,
1193 struct ethtool_ringparam *ering)
1194{
1195 struct axienet_local *lp = netdev_priv(ndev);
1196
1197 ering->rx_max_pending = RX_BD_NUM_MAX;
1198 ering->rx_mini_max_pending = 0;
1199 ering->rx_jumbo_max_pending = 0;
1200 ering->tx_max_pending = TX_BD_NUM_MAX;
1201 ering->rx_pending = lp->rx_bd_num;
1202 ering->rx_mini_pending = 0;
1203 ering->rx_jumbo_pending = 0;
1204 ering->tx_pending = lp->tx_bd_num;
1205}
1206
1207static int axienet_ethtools_set_ringparam(struct net_device *ndev,
1208 struct ethtool_ringparam *ering)
1209{
1210 struct axienet_local *lp = netdev_priv(ndev);
1211
1212 if (ering->rx_pending > RX_BD_NUM_MAX ||
1213 ering->rx_mini_pending ||
1214 ering->rx_jumbo_pending ||
1215 ering->rx_pending > TX_BD_NUM_MAX)
1216 return -EINVAL;
1217
1218 if (netif_running(ndev))
1219 return -EBUSY;
1220
1221 lp->rx_bd_num = ering->rx_pending;
1222 lp->tx_bd_num = ering->tx_pending;
1223 return 0;
1154} 1224}
1155 1225
1156/** 1226/**
@@ -1166,12 +1236,9 @@ static void
1166axienet_ethtools_get_pauseparam(struct net_device *ndev, 1236axienet_ethtools_get_pauseparam(struct net_device *ndev,
1167 struct ethtool_pauseparam *epauseparm) 1237 struct ethtool_pauseparam *epauseparm)
1168{ 1238{
1169 u32 regval;
1170 struct axienet_local *lp = netdev_priv(ndev); 1239 struct axienet_local *lp = netdev_priv(ndev);
1171 epauseparm->autoneg = 0; 1240
1172 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1241 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1173 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1174 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1175} 1242}
1176 1243
1177/** 1244/**
@@ -1190,27 +1257,9 @@ static int
1190axienet_ethtools_set_pauseparam(struct net_device *ndev, 1257axienet_ethtools_set_pauseparam(struct net_device *ndev,
1191 struct ethtool_pauseparam *epauseparm) 1258 struct ethtool_pauseparam *epauseparm)
1192{ 1259{
1193 u32 regval = 0;
1194 struct axienet_local *lp = netdev_priv(ndev); 1260 struct axienet_local *lp = netdev_priv(ndev);
1195 1261
1196 if (netif_running(ndev)) { 1262 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1197 netdev_err(ndev,
1198 "Please stop netif before applying configuration\n");
1199 return -EFAULT;
1200 }
1201
1202 regval = axienet_ior(lp, XAE_FCC_OFFSET);
1203 if (epauseparm->tx_pause)
1204 regval |= XAE_FCC_FCTX_MASK;
1205 else
1206 regval &= ~XAE_FCC_FCTX_MASK;
1207 if (epauseparm->rx_pause)
1208 regval |= XAE_FCC_FCRX_MASK;
1209 else
1210 regval &= ~XAE_FCC_FCRX_MASK;
1211 axienet_iow(lp, XAE_FCC_OFFSET, regval);
1212
1213 return 0;
1214} 1263}
1215 1264
1216/** 1265/**
@@ -1289,17 +1338,170 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1289 return 0; 1338 return 0;
1290} 1339}
1291 1340
1341static int
1342axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1343 struct ethtool_link_ksettings *cmd)
1344{
1345 struct axienet_local *lp = netdev_priv(ndev);
1346
1347 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1348}
1349
1350static int
1351axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1352 const struct ethtool_link_ksettings *cmd)
1353{
1354 struct axienet_local *lp = netdev_priv(ndev);
1355
1356 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1357}
1358
1292static const struct ethtool_ops axienet_ethtool_ops = { 1359static const struct ethtool_ops axienet_ethtool_ops = {
1293 .get_drvinfo = axienet_ethtools_get_drvinfo, 1360 .get_drvinfo = axienet_ethtools_get_drvinfo,
1294 .get_regs_len = axienet_ethtools_get_regs_len, 1361 .get_regs_len = axienet_ethtools_get_regs_len,
1295 .get_regs = axienet_ethtools_get_regs, 1362 .get_regs = axienet_ethtools_get_regs,
1296 .get_link = ethtool_op_get_link, 1363 .get_link = ethtool_op_get_link,
1364 .get_ringparam = axienet_ethtools_get_ringparam,
1365 .set_ringparam = axienet_ethtools_set_ringparam,
1297 .get_pauseparam = axienet_ethtools_get_pauseparam, 1366 .get_pauseparam = axienet_ethtools_get_pauseparam,
1298 .set_pauseparam = axienet_ethtools_set_pauseparam, 1367 .set_pauseparam = axienet_ethtools_set_pauseparam,
1299 .get_coalesce = axienet_ethtools_get_coalesce, 1368 .get_coalesce = axienet_ethtools_get_coalesce,
1300 .set_coalesce = axienet_ethtools_set_coalesce, 1369 .set_coalesce = axienet_ethtools_set_coalesce,
1301 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1370 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
1302 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1371 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
1372};
1373
1374static void axienet_validate(struct phylink_config *config,
1375 unsigned long *supported,
1376 struct phylink_link_state *state)
1377{
1378 struct net_device *ndev = to_net_dev(config->dev);
1379 struct axienet_local *lp = netdev_priv(ndev);
1380 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1381
1382 /* Only support the mode we are configured for */
1383 if (state->interface != PHY_INTERFACE_MODE_NA &&
1384 state->interface != lp->phy_mode) {
1385 netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
1386 phy_modes(state->interface),
1387 phy_modes(lp->phy_mode));
1388 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1389 return;
1390 }
1391
1392 phylink_set(mask, Autoneg);
1393 phylink_set_port_modes(mask);
1394
1395 phylink_set(mask, Asym_Pause);
1396 phylink_set(mask, Pause);
1397 phylink_set(mask, 1000baseX_Full);
1398 phylink_set(mask, 10baseT_Full);
1399 phylink_set(mask, 100baseT_Full);
1400 phylink_set(mask, 1000baseT_Full);
1401
1402 bitmap_and(supported, supported, mask,
1403 __ETHTOOL_LINK_MODE_MASK_NBITS);
1404 bitmap_and(state->advertising, state->advertising, mask,
1405 __ETHTOOL_LINK_MODE_MASK_NBITS);
1406}
1407
1408static int axienet_mac_link_state(struct phylink_config *config,
1409 struct phylink_link_state *state)
1410{
1411 struct net_device *ndev = to_net_dev(config->dev);
1412 struct axienet_local *lp = netdev_priv(ndev);
1413 u32 emmc_reg, fcc_reg;
1414
1415 state->interface = lp->phy_mode;
1416
1417 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1418 if (emmc_reg & XAE_EMMC_LINKSPD_1000)
1419 state->speed = SPEED_1000;
1420 else if (emmc_reg & XAE_EMMC_LINKSPD_100)
1421 state->speed = SPEED_100;
1422 else
1423 state->speed = SPEED_10;
1424
1425 state->pause = 0;
1426 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1427 if (fcc_reg & XAE_FCC_FCTX_MASK)
1428 state->pause |= MLO_PAUSE_TX;
1429 if (fcc_reg & XAE_FCC_FCRX_MASK)
1430 state->pause |= MLO_PAUSE_RX;
1431
1432 state->an_complete = 0;
1433 state->duplex = 1;
1434
1435 return 1;
1436}
1437
1438static void axienet_mac_an_restart(struct phylink_config *config)
1439{
1440 /* Unsupported, do nothing */
1441}
1442
1443static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1444 const struct phylink_link_state *state)
1445{
1446 struct net_device *ndev = to_net_dev(config->dev);
1447 struct axienet_local *lp = netdev_priv(ndev);
1448 u32 emmc_reg, fcc_reg;
1449
1450 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1451 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1452
1453 switch (state->speed) {
1454 case SPEED_1000:
1455 emmc_reg |= XAE_EMMC_LINKSPD_1000;
1456 break;
1457 case SPEED_100:
1458 emmc_reg |= XAE_EMMC_LINKSPD_100;
1459 break;
1460 case SPEED_10:
1461 emmc_reg |= XAE_EMMC_LINKSPD_10;
1462 break;
1463 default:
1464 dev_err(&ndev->dev,
1465 "Speed other than 10, 100 or 1Gbps is not supported\n");
1466 break;
1467 }
1468
1469 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1470
1471 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1472 if (state->pause & MLO_PAUSE_TX)
1473 fcc_reg |= XAE_FCC_FCTX_MASK;
1474 else
1475 fcc_reg &= ~XAE_FCC_FCTX_MASK;
1476 if (state->pause & MLO_PAUSE_RX)
1477 fcc_reg |= XAE_FCC_FCRX_MASK;
1478 else
1479 fcc_reg &= ~XAE_FCC_FCRX_MASK;
1480 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1481}
1482
1483static void axienet_mac_link_down(struct phylink_config *config,
1484 unsigned int mode,
1485 phy_interface_t interface)
1486{
1487 /* nothing meaningful to do */
1488}
1489
1490static void axienet_mac_link_up(struct phylink_config *config,
1491 unsigned int mode,
1492 phy_interface_t interface,
1493 struct phy_device *phy)
1494{
1495 /* nothing meaningful to do */
1496}
1497
1498static const struct phylink_mac_ops axienet_phylink_ops = {
1499 .validate = axienet_validate,
1500 .mac_link_state = axienet_mac_link_state,
1501 .mac_an_restart = axienet_mac_an_restart,
1502 .mac_config = axienet_mac_config,
1503 .mac_link_down = axienet_mac_link_down,
1504 .mac_link_up = axienet_mac_link_up,
1303}; 1505};
1304 1506
1305/** 1507/**
@@ -1313,38 +1515,33 @@ static void axienet_dma_err_handler(unsigned long data)
1313{ 1515{
1314 u32 axienet_status; 1516 u32 axienet_status;
1315 u32 cr, i; 1517 u32 cr, i;
1316 int mdio_mcreg;
1317 struct axienet_local *lp = (struct axienet_local *) data; 1518 struct axienet_local *lp = (struct axienet_local *) data;
1318 struct net_device *ndev = lp->ndev; 1519 struct net_device *ndev = lp->ndev;
1319 struct axidma_bd *cur_p; 1520 struct axidma_bd *cur_p;
1320 1521
1321 axienet_setoptions(ndev, lp->options & 1522 axienet_setoptions(ndev, lp->options &
1322 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1523 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1323 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1324 axienet_mdio_wait_until_ready(lp);
1325 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1524 /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1326 * When we do an Axi Ethernet reset, it resets the complete core 1525 * When we do an Axi Ethernet reset, it resets the complete core
1327 * including the MDIO. So if MDIO is not disabled when the reset 1526 * including the MDIO. MDIO must be disabled before resetting
1328 * process is started, MDIO will be broken afterwards. 1527 * and re-enabled afterwards.
1528 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1329 */ 1529 */
1330 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1530 mutex_lock(&lp->mii_bus->mdio_lock);
1331 ~XAE_MDIO_MC_MDIOEN_MASK)); 1531 axienet_mdio_disable(lp);
1332 1532 __axienet_device_reset(lp);
1333 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 1533 axienet_mdio_enable(lp);
1334 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 1534 mutex_unlock(&lp->mii_bus->mdio_lock);
1335
1336 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1337 axienet_mdio_wait_until_ready(lp);
1338 1535
1339 for (i = 0; i < TX_BD_NUM; i++) { 1536 for (i = 0; i < lp->tx_bd_num; i++) {
1340 cur_p = &lp->tx_bd_v[i]; 1537 cur_p = &lp->tx_bd_v[i];
1341 if (cur_p->phys) 1538 if (cur_p->phys)
1342 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1539 dma_unmap_single(ndev->dev.parent, cur_p->phys,
1343 (cur_p->cntrl & 1540 (cur_p->cntrl &
1344 XAXIDMA_BD_CTRL_LENGTH_MASK), 1541 XAXIDMA_BD_CTRL_LENGTH_MASK),
1345 DMA_TO_DEVICE); 1542 DMA_TO_DEVICE);
1346 if (cur_p->app4) 1543 if (cur_p->skb)
1347 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1544 dev_kfree_skb_irq(cur_p->skb);
1348 cur_p->phys = 0; 1545 cur_p->phys = 0;
1349 cur_p->cntrl = 0; 1546 cur_p->cntrl = 0;
1350 cur_p->status = 0; 1547 cur_p->status = 0;
@@ -1353,10 +1550,10 @@ static void axienet_dma_err_handler(unsigned long data)
1353 cur_p->app2 = 0; 1550 cur_p->app2 = 0;
1354 cur_p->app3 = 0; 1551 cur_p->app3 = 0;
1355 cur_p->app4 = 0; 1552 cur_p->app4 = 0;
1356 cur_p->sw_id_offset = 0; 1553 cur_p->skb = NULL;
1357 } 1554 }
1358 1555
1359 for (i = 0; i < RX_BD_NUM; i++) { 1556 for (i = 0; i < lp->rx_bd_num; i++) {
1360 cur_p = &lp->rx_bd_v[i]; 1557 cur_p = &lp->rx_bd_v[i];
1361 cur_p->status = 0; 1558 cur_p->status = 0;
1362 cur_p->app0 = 0; 1559 cur_p->app0 = 0;
@@ -1404,7 +1601,7 @@ static void axienet_dma_err_handler(unsigned long data)
1404 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1601 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1405 cr | XAXIDMA_CR_RUNSTOP_MASK); 1602 cr | XAXIDMA_CR_RUNSTOP_MASK);
1406 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1603 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1407 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1604 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
1408 1605
1409 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1606 /* Write to the RS (Run-stop) bit in the Tx channel control register.
1410 * Tx channel is now ready to run. But only after we write to the 1607 * Tx channel is now ready to run. But only after we write to the
@@ -1422,6 +1619,8 @@ static void axienet_dma_err_handler(unsigned long data)
1422 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1619 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1423 if (axienet_status & XAE_INT_RXRJECT_MASK) 1620 if (axienet_status & XAE_INT_RXRJECT_MASK)
1424 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1621 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1622 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1623 XAE_INT_RECV_ERROR_MASK : 0);
1425 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1624 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1426 1625
1427 /* Sync default options with HW but leave receiver and 1626 /* Sync default options with HW but leave receiver and
@@ -1453,7 +1652,7 @@ static int axienet_probe(struct platform_device *pdev)
1453 struct axienet_local *lp; 1652 struct axienet_local *lp;
1454 struct net_device *ndev; 1653 struct net_device *ndev;
1455 const void *mac_addr; 1654 const void *mac_addr;
1456 struct resource *ethres, dmares; 1655 struct resource *ethres;
1457 u32 value; 1656 u32 value;
1458 1657
1459 ndev = alloc_etherdev(sizeof(*lp)); 1658 ndev = alloc_etherdev(sizeof(*lp));
@@ -1476,8 +1675,11 @@ static int axienet_probe(struct platform_device *pdev)
1476 lp->ndev = ndev; 1675 lp->ndev = ndev;
1477 lp->dev = &pdev->dev; 1676 lp->dev = &pdev->dev;
1478 lp->options = XAE_OPTION_DEFAULTS; 1677 lp->options = XAE_OPTION_DEFAULTS;
1678 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1679 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1479 /* Map device registers */ 1680 /* Map device registers */
1480 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1681 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1682 lp->regs_start = ethres->start;
1481 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1683 lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
1482 if (IS_ERR(lp->regs)) { 1684 if (IS_ERR(lp->regs)) {
1483 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1685 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
@@ -1568,38 +1770,57 @@ static int axienet_probe(struct platform_device *pdev)
1568 1770
1569 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1771 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1570 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1772 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
1571 if (!np) { 1773 if (np) {
1572 dev_err(&pdev->dev, "could not find DMA node\n"); 1774 struct resource dmares;
1573 ret = -ENODEV; 1775
1574 goto free_netdev; 1776 ret = of_address_to_resource(np, 0, &dmares);
1575 } 1777 if (ret) {
1576 ret = of_address_to_resource(np, 0, &dmares); 1778 dev_err(&pdev->dev,
1577 if (ret) { 1779 "unable to get DMA resource\n");
1578 dev_err(&pdev->dev, "unable to get DMA resource\n"); 1780 of_node_put(np);
1781 goto free_netdev;
1782 }
1783 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
1784 &dmares);
1785 lp->rx_irq = irq_of_parse_and_map(np, 1);
1786 lp->tx_irq = irq_of_parse_and_map(np, 0);
1579 of_node_put(np); 1787 of_node_put(np);
1580 goto free_netdev; 1788 lp->eth_irq = platform_get_irq(pdev, 0);
1789 } else {
1790 /* Check for these resources directly on the Ethernet node. */
1791 struct resource *res = platform_get_resource(pdev,
1792 IORESOURCE_MEM, 1);
1793 if (!res) {
1794 dev_err(&pdev->dev, "unable to get DMA memory resource\n");
1795 goto free_netdev;
1796 }
1797 lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
1798 lp->rx_irq = platform_get_irq(pdev, 1);
1799 lp->tx_irq = platform_get_irq(pdev, 0);
1800 lp->eth_irq = platform_get_irq(pdev, 2);
1581 } 1801 }
1582 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1583 if (IS_ERR(lp->dma_regs)) { 1802 if (IS_ERR(lp->dma_regs)) {
1584 dev_err(&pdev->dev, "could not map DMA regs\n"); 1803 dev_err(&pdev->dev, "could not map DMA regs\n");
1585 ret = PTR_ERR(lp->dma_regs); 1804 ret = PTR_ERR(lp->dma_regs);
1586 of_node_put(np); 1805 of_node_put(np);
1587 goto free_netdev; 1806 goto free_netdev;
1588 } 1807 }
1589 lp->rx_irq = irq_of_parse_and_map(np, 1);
1590 lp->tx_irq = irq_of_parse_and_map(np, 0);
1591 of_node_put(np);
1592 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1808 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
1593 dev_err(&pdev->dev, "could not determine irqs\n"); 1809 dev_err(&pdev->dev, "could not determine irqs\n");
1594 ret = -ENOMEM; 1810 ret = -ENOMEM;
1595 goto free_netdev; 1811 goto free_netdev;
1596 } 1812 }
1597 1813
1814 /* Check for Ethernet core IRQ (optional) */
1815 if (lp->eth_irq <= 0)
1816 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
1817
1598 /* Retrieve the MAC address */ 1818 /* Retrieve the MAC address */
1599 mac_addr = of_get_mac_address(pdev->dev.of_node); 1819 mac_addr = of_get_mac_address(pdev->dev.of_node);
1600 if (IS_ERR(mac_addr)) { 1820 if (IS_ERR(mac_addr)) {
1601 dev_err(&pdev->dev, "could not find MAC address\n"); 1821 dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
1602 goto free_netdev; 1822 PTR_ERR(mac_addr));
1823 mac_addr = NULL;
1603 } 1824 }
1604 axienet_set_mac_address(ndev, mac_addr); 1825 axienet_set_mac_address(ndev, mac_addr);
1605 1826
@@ -1608,9 +1829,36 @@ static int axienet_probe(struct platform_device *pdev)
1608 1829
1609 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1830 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1610 if (lp->phy_node) { 1831 if (lp->phy_node) {
1611 ret = axienet_mdio_setup(lp, pdev->dev.of_node); 1832 lp->clk = devm_clk_get(&pdev->dev, NULL);
1833 if (IS_ERR(lp->clk)) {
1834 dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
1835 PTR_ERR(lp->clk));
1836 lp->clk = NULL;
1837 } else {
1838 ret = clk_prepare_enable(lp->clk);
1839 if (ret) {
1840 dev_err(&pdev->dev, "Unable to enable clock: %d\n",
1841 ret);
1842 goto free_netdev;
1843 }
1844 }
1845
1846 ret = axienet_mdio_setup(lp);
1612 if (ret) 1847 if (ret)
1613 dev_warn(&pdev->dev, "error registering MDIO bus\n"); 1848 dev_warn(&pdev->dev,
1849 "error registering MDIO bus: %d\n", ret);
1850 }
1851
1852 lp->phylink_config.dev = &ndev->dev;
1853 lp->phylink_config.type = PHYLINK_NETDEV;
1854
1855 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
1856 lp->phy_mode,
1857 &axienet_phylink_ops);
1858 if (IS_ERR(lp->phylink)) {
1859 ret = PTR_ERR(lp->phylink);
1860 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
1861 goto free_netdev;
1614 } 1862 }
1615 1863
1616 ret = register_netdev(lp->ndev); 1864 ret = register_netdev(lp->ndev);
@@ -1632,9 +1880,16 @@ static int axienet_remove(struct platform_device *pdev)
1632 struct net_device *ndev = platform_get_drvdata(pdev); 1880 struct net_device *ndev = platform_get_drvdata(pdev);
1633 struct axienet_local *lp = netdev_priv(ndev); 1881 struct axienet_local *lp = netdev_priv(ndev);
1634 1882
1635 axienet_mdio_teardown(lp);
1636 unregister_netdev(ndev); 1883 unregister_netdev(ndev);
1637 1884
1885 if (lp->phylink)
1886 phylink_destroy(lp->phylink);
1887
1888 axienet_mdio_teardown(lp);
1889
1890 if (lp->clk)
1891 clk_disable_unprepare(lp->clk);
1892
1638 of_node_put(lp->phy_node); 1893 of_node_put(lp->phy_node);
1639 lp->phy_node = NULL; 1894 lp->phy_node = NULL;
1640 1895
@@ -1643,9 +1898,23 @@ static int axienet_remove(struct platform_device *pdev)
1643 return 0; 1898 return 0;
1644} 1899}
1645 1900
1901static void axienet_shutdown(struct platform_device *pdev)
1902{
1903 struct net_device *ndev = platform_get_drvdata(pdev);
1904
1905 rtnl_lock();
1906 netif_device_detach(ndev);
1907
1908 if (netif_running(ndev))
1909 dev_close(ndev);
1910
1911 rtnl_unlock();
1912}
1913
1646static struct platform_driver axienet_driver = { 1914static struct platform_driver axienet_driver = {
1647 .probe = axienet_probe, 1915 .probe = axienet_probe,
1648 .remove = axienet_remove, 1916 .remove = axienet_remove,
1917 .shutdown = axienet_shutdown,
1649 .driver = { 1918 .driver = {
1650 .name = "xilinx_axienet", 1919 .name = "xilinx_axienet",
1651 .of_match_table = axienet_of_match, 1920 .of_match_table = axienet_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 704babdbc8a2..435ed308d990 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -5,9 +5,11 @@
5 * Copyright (c) 2009 Secret Lab Technologies, Ltd. 5 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
6 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 6 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
7 * Copyright (c) 2010 - 2011 PetaLogix 7 * Copyright (c) 2010 - 2011 PetaLogix
8 * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
8 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
9 */ 10 */
10 11
12#include <linux/clk.h>
11#include <linux/of_address.h> 13#include <linux/of_address.h>
12#include <linux/of_mdio.h> 14#include <linux/of_mdio.h>
13#include <linux/jiffies.h> 15#include <linux/jiffies.h>
@@ -16,10 +18,10 @@
16#include "xilinx_axienet.h" 18#include "xilinx_axienet.h"
17 19
18#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */ 20#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
19#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT 21#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
20 22
21/* Wait till MDIO interface is ready to accept a new transaction.*/ 23/* Wait till MDIO interface is ready to accept a new transaction.*/
22int axienet_mdio_wait_until_ready(struct axienet_local *lp) 24static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
23{ 25{
24 u32 val; 26 u32 val;
25 27
@@ -112,23 +114,42 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
112} 114}
113 115
114/** 116/**
115 * axienet_mdio_setup - MDIO setup function 117 * axienet_mdio_enable - MDIO hardware setup function
116 * @lp: Pointer to axienet local data structure. 118 * @lp: Pointer to axienet local data structure.
117 * @np: Pointer to device node
118 * 119 *
119 * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when 120 * Return: 0 on success, -ETIMEDOUT on a timeout.
120 * mdiobus_alloc (to allocate memory for mii bus structure) fails.
121 * 121 *
122 * Sets up the MDIO interface by initializing the MDIO clock and enabling the 122 * Sets up the MDIO interface by initializing the MDIO clock and enabling the
123 * MDIO interface in hardware. Register the MDIO interface. 123 * MDIO interface in hardware.
124 **/ 124 **/
125int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) 125int axienet_mdio_enable(struct axienet_local *lp)
126{ 126{
127 int ret;
128 u32 clk_div, host_clock; 127 u32 clk_div, host_clock;
129 struct mii_bus *bus; 128
130 struct resource res; 129 if (lp->clk) {
131 struct device_node *np1; 130 host_clock = clk_get_rate(lp->clk);
131 } else {
132 struct device_node *np1;
133
134 /* Legacy fallback: detect CPU clock frequency and use as AXI
135 * bus clock frequency. This only works on certain platforms.
136 */
137 np1 = of_find_node_by_name(NULL, "cpu");
138 if (!np1) {
139 netdev_warn(lp->ndev, "Could not find CPU device node.\n");
140 host_clock = DEFAULT_HOST_CLOCK;
141 } else {
142 int ret = of_property_read_u32(np1, "clock-frequency",
143 &host_clock);
144 if (ret) {
145 netdev_warn(lp->ndev, "CPU clock-frequency property not found.\n");
146 host_clock = DEFAULT_HOST_CLOCK;
147 }
148 of_node_put(np1);
149 }
150 netdev_info(lp->ndev, "Setting assumed host clock to %u\n",
151 host_clock);
152 }
132 153
133 /* clk_div can be calculated by deriving it from the equation: 154 /* clk_div can be calculated by deriving it from the equation:
134 * fMDIO = fHOST / ((1 + clk_div) * 2) 155 * fMDIO = fHOST / ((1 + clk_div) * 2)
@@ -155,25 +176,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
155 * "clock-frequency" from the CPU 176 * "clock-frequency" from the CPU
156 */ 177 */
157 178
158 np1 = of_find_node_by_name(NULL, "cpu");
159 if (!np1) {
160 netdev_warn(lp->ndev, "Could not find CPU device node.\n");
161 netdev_warn(lp->ndev,
162 "Setting MDIO clock divisor to default %d\n",
163 DEFAULT_CLOCK_DIVISOR);
164 clk_div = DEFAULT_CLOCK_DIVISOR;
165 goto issue;
166 }
167 if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
168 netdev_warn(lp->ndev, "clock-frequency property not found.\n");
169 netdev_warn(lp->ndev,
170 "Setting MDIO clock divisor to default %d\n",
171 DEFAULT_CLOCK_DIVISOR);
172 clk_div = DEFAULT_CLOCK_DIVISOR;
173 of_node_put(np1);
174 goto issue;
175 }
176
177 clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1; 179 clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
178 /* If there is any remainder from the division of 180 /* If there is any remainder from the division of
179 * fHOST / (MAX_MDIO_FREQ * 2), then we need to add 181 * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
@@ -186,12 +188,39 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
186 "Setting MDIO clock divisor to %u/%u Hz host clock.\n", 188 "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
187 clk_div, host_clock); 189 clk_div, host_clock);
188 190
189 of_node_put(np1); 191 axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
190issue:
191 axienet_iow(lp, XAE_MDIO_MC_OFFSET,
192 (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
193 192
194 ret = axienet_mdio_wait_until_ready(lp); 193 return axienet_mdio_wait_until_ready(lp);
194}
195
196/**
197 * axienet_mdio_disable - MDIO hardware disable function
198 * @lp: Pointer to axienet local data structure.
199 *
200 * Disable the MDIO interface in hardware.
201 **/
202void axienet_mdio_disable(struct axienet_local *lp)
203{
204 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 0);
205}
206
207/**
208 * axienet_mdio_setup - MDIO setup function
209 * @lp: Pointer to axienet local data structure.
210 *
211 * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
212 * mdiobus_alloc (to allocate memory for mii bus structure) fails.
213 *
214 * Sets up the MDIO interface by initializing the MDIO clock and enabling the
215 * MDIO interface in hardware. Register the MDIO interface.
216 **/
217int axienet_mdio_setup(struct axienet_local *lp)
218{
219 struct device_node *mdio_node;
220 struct mii_bus *bus;
221 int ret;
222
223 ret = axienet_mdio_enable(lp);
195 if (ret < 0) 224 if (ret < 0)
196 return ret; 225 return ret;
197 226
@@ -199,10 +228,8 @@ issue:
199 if (!bus) 228 if (!bus)
200 return -ENOMEM; 229 return -ENOMEM;
201 230
202 np1 = of_get_parent(lp->phy_node); 231 snprintf(bus->id, MII_BUS_ID_SIZE, "axienet-%.8llx",
203 of_address_to_resource(np1, 0, &res); 232 (unsigned long long)lp->regs_start);
204 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
205 (unsigned long long) res.start);
206 233
207 bus->priv = lp; 234 bus->priv = lp;
208 bus->name = "Xilinx Axi Ethernet MDIO"; 235 bus->name = "Xilinx Axi Ethernet MDIO";
@@ -211,7 +238,9 @@ issue:
211 bus->parent = lp->dev; 238 bus->parent = lp->dev;
212 lp->mii_bus = bus; 239 lp->mii_bus = bus;
213 240
214 ret = of_mdiobus_register(bus, np1); 241 mdio_node = of_get_child_by_name(lp->dev->of_node, "mdio");
242 ret = of_mdiobus_register(bus, mdio_node);
243 of_node_put(mdio_node);
215 if (ret) { 244 if (ret) {
216 mdiobus_free(bus); 245 mdiobus_free(bus);
217 lp->mii_bus = NULL; 246 lp->mii_bus = NULL;