aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c519
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c53
4 files changed, 400 insertions, 203 deletions
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index da4ec575ccf9..db448fad621b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_XILINX 5config NET_VENDOR_XILINX
6 bool "Xilinx devices" 6 bool "Xilinx devices"
7 default y 7 default y
8 depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS 8 depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
11 11
@@ -33,8 +33,7 @@ config XILINX_AXI_EMAC
33 33
34config XILINX_LL_TEMAC 34config XILINX_LL_TEMAC
35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
36 depends on (PPC || MICROBLAZE) 36 depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
37 depends on !64BIT || BROKEN
38 select PHYLIB 37 select PHYLIB
39 ---help--- 38 ---help---
40 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC 39 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 107575225383..1aeda084b8f1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -334,6 +334,9 @@ struct temac_local {
334 334
335 /* Connection to PHY device */ 335 /* Connection to PHY device */
336 struct device_node *phy_node; 336 struct device_node *phy_node;
337 /* For non-device-tree devices */
338 char phy_name[MII_BUS_ID_SIZE + 3];
339 phy_interface_t phy_interface;
337 340
338 /* MDIO bus data */ 341 /* MDIO bus data */
339 struct mii_bus *mii_bus; /* MII bus reference */ 342 struct mii_bus *mii_bus; /* MII bus reference */
@@ -344,8 +347,10 @@ struct temac_local {
344#ifdef CONFIG_PPC_DCR 347#ifdef CONFIG_PPC_DCR
345 dcr_host_t sdma_dcrs; 348 dcr_host_t sdma_dcrs;
346#endif 349#endif
347 u32 (*dma_in)(struct temac_local *, int); 350 u32 (*temac_ior)(struct temac_local *lp, int offset);
348 void (*dma_out)(struct temac_local *, int, u32); 351 void (*temac_iow)(struct temac_local *lp, int offset, u32 value);
352 u32 (*dma_in)(struct temac_local *lp, int reg);
353 void (*dma_out)(struct temac_local *lp, int reg, u32 value);
349 354
350 int tx_irq; 355 int tx_irq;
351 int rx_irq; 356 int rx_irq;
@@ -353,7 +358,10 @@ struct temac_local {
353 358
354 struct sk_buff **rx_skb; 359 struct sk_buff **rx_skb;
355 spinlock_t rx_lock; 360 spinlock_t rx_lock;
356 struct mutex indirect_mutex; 361 /* For synchronization of indirect register access. Must be
362 * shared mutex between interfaces in same TEMAC block.
363 */
364 struct mutex *indirect_mutex;
357 u32 options; /* Current options word */ 365 u32 options; /* Current options word */
358 int last_link; 366 int last_link;
359 unsigned int temac_features; 367 unsigned int temac_features;
@@ -367,18 +375,24 @@ struct temac_local {
367 int tx_bd_next; 375 int tx_bd_next;
368 int tx_bd_tail; 376 int tx_bd_tail;
369 int rx_bd_ci; 377 int rx_bd_ci;
378
379 /* DMA channel control setup */
380 u32 tx_chnl_ctrl;
381 u32 rx_chnl_ctrl;
370}; 382};
371 383
384/* Wrappers for temac_ior()/temac_iow() function pointers above */
385#define temac_ior(lp, o) ((lp)->temac_ior(lp, o))
386#define temac_iow(lp, o, v) ((lp)->temac_iow(lp, o, v))
387
372/* xilinx_temac.c */ 388/* xilinx_temac.c */
373u32 temac_ior(struct temac_local *lp, int offset);
374void temac_iow(struct temac_local *lp, int offset, u32 value);
375int temac_indirect_busywait(struct temac_local *lp); 389int temac_indirect_busywait(struct temac_local *lp);
376u32 temac_indirect_in32(struct temac_local *lp, int reg); 390u32 temac_indirect_in32(struct temac_local *lp, int reg);
377void temac_indirect_out32(struct temac_local *lp, int reg, u32 value); 391void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
378 392
379 393
380/* xilinx_temac_mdio.c */ 394/* xilinx_temac_mdio.c */
381int temac_mdio_setup(struct temac_local *lp, struct device_node *np); 395int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
382void temac_mdio_teardown(struct temac_local *lp); 396void temac_mdio_teardown(struct temac_local *lp);
383 397
384#endif /* XILINX_LL_TEMAC_H */ 398#endif /* XILINX_LL_TEMAC_H */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 44efffbe7970..1003ee14c833 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/netdevice.h> 35#include <linux/netdevice.h>
36#include <linux/if_ether.h>
36#include <linux/of.h> 37#include <linux/of.h>
37#include <linux/of_device.h> 38#include <linux/of_device.h>
38#include <linux/of_irq.h> 39#include <linux/of_irq.h>
@@ -51,6 +52,7 @@
51#include <linux/slab.h> 52#include <linux/slab.h>
52#include <linux/interrupt.h> 53#include <linux/interrupt.h>
53#include <linux/dma-mapping.h> 54#include <linux/dma-mapping.h>
55#include <linux/platform_data/xilinx-ll-temac.h>
54 56
55#include "ll_temac.h" 57#include "ll_temac.h"
56 58
@@ -61,14 +63,24 @@
61 * Low level register access functions 63 * Low level register access functions
62 */ 64 */
63 65
64u32 temac_ior(struct temac_local *lp, int offset) 66u32 _temac_ior_be(struct temac_local *lp, int offset)
65{ 67{
66 return in_be32(lp->regs + offset); 68 return ioread32be(lp->regs + offset);
67} 69}
68 70
69void temac_iow(struct temac_local *lp, int offset, u32 value) 71void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
70{ 72{
71 out_be32(lp->regs + offset, value); 73 return iowrite32be(value, lp->regs + offset);
74}
75
76u32 _temac_ior_le(struct temac_local *lp, int offset)
77{
78 return ioread32(lp->regs + offset);
79}
80
81void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
82{
83 return iowrite32(value, lp->regs + offset);
72} 84}
73 85
74int temac_indirect_busywait(struct temac_local *lp) 86int temac_indirect_busywait(struct temac_local *lp)
@@ -80,7 +92,7 @@ int temac_indirect_busywait(struct temac_local *lp)
80 WARN_ON(1); 92 WARN_ON(1);
81 return -ETIMEDOUT; 93 return -ETIMEDOUT;
82 } 94 }
83 msleep(1); 95 usleep_range(500, 1000);
84 } 96 }
85 return 0; 97 return 0;
86} 98}
@@ -119,23 +131,35 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
119} 131}
120 132
121/** 133/**
122 * temac_dma_in32 - Memory mapped DMA read, this function expects a 134 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
123 * register input that is based on DCR word addresses which 135 * register input that is based on DCR word addresses which are then
124 * are then converted to memory mapped byte addresses 136 * converted to memory mapped byte addresses. To be assigned to
137 * lp->dma_in32.
125 */ 138 */
126static u32 temac_dma_in32(struct temac_local *lp, int reg) 139static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
127{ 140{
128 return in_be32(lp->sdma_regs + (reg << 2)); 141 return ioread32be(lp->sdma_regs + (reg << 2));
142}
143
144static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
145{
146 return ioread32(lp->sdma_regs + (reg << 2));
129} 147}
130 148
131/** 149/**
132 * temac_dma_out32 - Memory mapped DMA read, this function expects a 150 * temac_dma_out32_* - Memory mapped DMA read, these function expects
133 * register input that is based on DCR word addresses which 151 * a register input that is based on DCR word addresses which are then
134 * are then converted to memory mapped byte addresses 152 * converted to memory mapped byte addresses. To be assigned to
153 * lp->dma_out32.
135 */ 154 */
136static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) 155static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
156{
157 iowrite32be(value, lp->sdma_regs + (reg << 2));
158}
159
160static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
137{ 161{
138 out_be32(lp->sdma_regs + (reg << 2), value); 162 iowrite32(value, lp->sdma_regs + (reg << 2));
139} 163}
140 164
141/* DMA register access functions can be DCR based or memory mapped. 165/* DMA register access functions can be DCR based or memory mapped.
@@ -187,7 +211,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
187 211
188/* 212/*
189 * temac_dcr_setup - This is a stub for when DCR is not supported, 213 * temac_dcr_setup - This is a stub for when DCR is not supported,
190 * such as with MicroBlaze 214 * such as with MicroBlaze and x86
191 */ 215 */
192static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, 216static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
193 struct device_node *np) 217 struct device_node *np)
@@ -225,7 +249,6 @@ static void temac_dma_bd_release(struct net_device *ndev)
225 dma_free_coherent(ndev->dev.parent, 249 dma_free_coherent(ndev->dev.parent,
226 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 250 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
227 lp->tx_bd_v, lp->tx_bd_p); 251 lp->tx_bd_v, lp->tx_bd_p);
228 kfree(lp->rx_skb);
229} 252}
230 253
231/** 254/**
@@ -235,9 +258,11 @@ static int temac_dma_bd_init(struct net_device *ndev)
235{ 258{
236 struct temac_local *lp = netdev_priv(ndev); 259 struct temac_local *lp = netdev_priv(ndev);
237 struct sk_buff *skb; 260 struct sk_buff *skb;
261 dma_addr_t skb_dma_addr;
238 int i; 262 int i;
239 263
240 lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); 264 lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
265 GFP_KERNEL);
241 if (!lp->rx_skb) 266 if (!lp->rx_skb)
242 goto out; 267 goto out;
243 268
@@ -256,13 +281,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
256 goto out; 281 goto out;
257 282
258 for (i = 0; i < TX_BD_NUM; i++) { 283 for (i = 0; i < TX_BD_NUM; i++) {
259 lp->tx_bd_v[i].next = lp->tx_bd_p + 284 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
260 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); 285 + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
261 } 286 }
262 287
263 for (i = 0; i < RX_BD_NUM; i++) { 288 for (i = 0; i < RX_BD_NUM; i++) {
264 lp->rx_bd_v[i].next = lp->rx_bd_p + 289 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
265 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 290 + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
266 291
267 skb = netdev_alloc_skb_ip_align(ndev, 292 skb = netdev_alloc_skb_ip_align(ndev,
268 XTE_MAX_JUMBO_FRAME_SIZE); 293 XTE_MAX_JUMBO_FRAME_SIZE);
@@ -271,31 +296,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
271 296
272 lp->rx_skb[i] = skb; 297 lp->rx_skb[i] = skb;
273 /* returns physical address of skb->data */ 298 /* returns physical address of skb->data */
274 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 299 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
275 skb->data, 300 XTE_MAX_JUMBO_FRAME_SIZE,
276 XTE_MAX_JUMBO_FRAME_SIZE, 301 DMA_FROM_DEVICE);
277 DMA_FROM_DEVICE); 302 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
278 lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE; 303 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
279 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; 304 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
280 } 305 }
281 306
282 lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | 307 /* Configure DMA channel (irq setup) */
283 CHNL_CTRL_IRQ_EN | 308 lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
284 CHNL_CTRL_IRQ_DLY_EN | 309 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
285 CHNL_CTRL_IRQ_COAL_EN); 310 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
286 /* 0x10220483 */ 311 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
287 /* 0x00100483 */ 312 lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
288 lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | 313 CHNL_CTRL_IRQ_IOE |
289 CHNL_CTRL_IRQ_EN | 314 CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
290 CHNL_CTRL_IRQ_DLY_EN | 315 CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
291 CHNL_CTRL_IRQ_COAL_EN |
292 CHNL_CTRL_IRQ_IOE);
293 /* 0xff010283 */
294
295 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
296 lp->dma_out(lp, RX_TAILDESC_PTR,
297 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
298 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
299 316
300 /* Init descriptor indexes */ 317 /* Init descriptor indexes */
301 lp->tx_bd_ci = 0; 318 lp->tx_bd_ci = 0;
@@ -303,6 +320,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
303 lp->tx_bd_tail = 0; 320 lp->tx_bd_tail = 0;
304 lp->rx_bd_ci = 0; 321 lp->rx_bd_ci = 0;
305 322
323 /* Enable RX DMA transfers */
324 wmb();
325 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
326 lp->dma_out(lp, RX_TAILDESC_PTR,
327 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
328
329 /* Prepare for TX DMA transfer */
330 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
331
306 return 0; 332 return 0;
307 333
308out: 334out:
@@ -319,7 +345,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
319 struct temac_local *lp = netdev_priv(ndev); 345 struct temac_local *lp = netdev_priv(ndev);
320 346
321 /* set up unicast MAC address filter set its mac address */ 347 /* set up unicast MAC address filter set its mac address */
322 mutex_lock(&lp->indirect_mutex); 348 mutex_lock(lp->indirect_mutex);
323 temac_indirect_out32(lp, XTE_UAW0_OFFSET, 349 temac_indirect_out32(lp, XTE_UAW0_OFFSET,
324 (ndev->dev_addr[0]) | 350 (ndev->dev_addr[0]) |
325 (ndev->dev_addr[1] << 8) | 351 (ndev->dev_addr[1] << 8) |
@@ -330,7 +356,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
330 temac_indirect_out32(lp, XTE_UAW1_OFFSET, 356 temac_indirect_out32(lp, XTE_UAW1_OFFSET,
331 (ndev->dev_addr[4] & 0x000000ff) | 357 (ndev->dev_addr[4] & 0x000000ff) |
332 (ndev->dev_addr[5] << 8)); 358 (ndev->dev_addr[5] << 8));
333 mutex_unlock(&lp->indirect_mutex); 359 mutex_unlock(lp->indirect_mutex);
334} 360}
335 361
336static int temac_init_mac_address(struct net_device *ndev, const void *address) 362static int temac_init_mac_address(struct net_device *ndev, const void *address)
@@ -359,7 +385,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
359 u32 multi_addr_msw, multi_addr_lsw, val; 385 u32 multi_addr_msw, multi_addr_lsw, val;
360 int i; 386 int i;
361 387
362 mutex_lock(&lp->indirect_mutex); 388 mutex_lock(lp->indirect_mutex);
363 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 389 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
364 netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) { 390 netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
365 /* 391 /*
@@ -398,7 +424,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
398 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0); 424 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
399 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 425 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
400 } 426 }
401 mutex_unlock(&lp->indirect_mutex); 427 mutex_unlock(lp->indirect_mutex);
402} 428}
403 429
404static struct temac_option { 430static struct temac_option {
@@ -490,7 +516,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
490 struct temac_option *tp = &temac_options[0]; 516 struct temac_option *tp = &temac_options[0];
491 int reg; 517 int reg;
492 518
493 mutex_lock(&lp->indirect_mutex); 519 mutex_lock(lp->indirect_mutex);
494 while (tp->opt) { 520 while (tp->opt) {
495 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or; 521 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
496 if (options & tp->opt) 522 if (options & tp->opt)
@@ -499,7 +525,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
499 tp++; 525 tp++;
500 } 526 }
501 lp->options |= options; 527 lp->options |= options;
502 mutex_unlock(&lp->indirect_mutex); 528 mutex_unlock(lp->indirect_mutex);
503 529
504 return 0; 530 return 0;
505} 531}
@@ -518,7 +544,7 @@ static void temac_device_reset(struct net_device *ndev)
518 544
519 dev_dbg(&ndev->dev, "%s()\n", __func__); 545 dev_dbg(&ndev->dev, "%s()\n", __func__);
520 546
521 mutex_lock(&lp->indirect_mutex); 547 mutex_lock(lp->indirect_mutex);
522 /* Reset the receiver and wait for it to finish reset */ 548 /* Reset the receiver and wait for it to finish reset */
523 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); 549 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
524 timeout = 1000; 550 timeout = 1000;
@@ -570,7 +596,7 @@ static void temac_device_reset(struct net_device *ndev)
570 temac_indirect_out32(lp, XTE_TXC_OFFSET, 0); 596 temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
571 temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); 597 temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
572 598
573 mutex_unlock(&lp->indirect_mutex); 599 mutex_unlock(lp->indirect_mutex);
574 600
575 /* Sync default options with HW 601 /* Sync default options with HW
576 * but leave receiver and transmitter disabled. */ 602 * but leave receiver and transmitter disabled. */
@@ -598,7 +624,7 @@ static void temac_adjust_link(struct net_device *ndev)
598 /* hash together the state values to decide if something has changed */ 624 /* hash together the state values to decide if something has changed */
599 link_state = phy->speed | (phy->duplex << 1) | phy->link; 625 link_state = phy->speed | (phy->duplex << 1) | phy->link;
600 626
601 mutex_lock(&lp->indirect_mutex); 627 mutex_lock(lp->indirect_mutex);
602 if (lp->last_link != link_state) { 628 if (lp->last_link != link_state) {
603 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET); 629 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
604 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; 630 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
@@ -614,23 +640,52 @@ static void temac_adjust_link(struct net_device *ndev)
614 lp->last_link = link_state; 640 lp->last_link = link_state;
615 phy_print_status(phy); 641 phy_print_status(phy);
616 } 642 }
617 mutex_unlock(&lp->indirect_mutex); 643 mutex_unlock(lp->indirect_mutex);
644}
645
646#ifdef CONFIG_64BIT
647
648void ptr_to_txbd(void *p, struct cdmac_bd *bd)
649{
650 bd->app3 = (u32)(((u64)p) >> 32);
651 bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
652}
653
654void *ptr_from_txbd(struct cdmac_bd *bd)
655{
656 return (void *)(((u64)(bd->app3) << 32) | bd->app4);
618} 657}
619 658
659#else
660
661void ptr_to_txbd(void *p, struct cmdac_bd *bd)
662{
663 bd->app4 = (u32)p;
664}
665
666void *ptr_from_txbd(struct cdmac_bd *bd)
667{
668 return (void *)(bd->app4);
669}
670
671#endif
672
620static void temac_start_xmit_done(struct net_device *ndev) 673static void temac_start_xmit_done(struct net_device *ndev)
621{ 674{
622 struct temac_local *lp = netdev_priv(ndev); 675 struct temac_local *lp = netdev_priv(ndev);
623 struct cdmac_bd *cur_p; 676 struct cdmac_bd *cur_p;
624 unsigned int stat = 0; 677 unsigned int stat = 0;
678 struct sk_buff *skb;
625 679
626 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 680 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
627 stat = cur_p->app0; 681 stat = be32_to_cpu(cur_p->app0);
628 682
629 while (stat & STS_CTRL_APP0_CMPLT) { 683 while (stat & STS_CTRL_APP0_CMPLT) {
630 dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, 684 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
631 DMA_TO_DEVICE); 685 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
632 if (cur_p->app4) 686 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
633 dev_consume_skb_irq((struct sk_buff *)cur_p->app4); 687 if (skb)
688 dev_consume_skb_irq(skb);
634 cur_p->app0 = 0; 689 cur_p->app0 = 0;
635 cur_p->app1 = 0; 690 cur_p->app1 = 0;
636 cur_p->app2 = 0; 691 cur_p->app2 = 0;
@@ -638,14 +693,14 @@ static void temac_start_xmit_done(struct net_device *ndev)
638 cur_p->app4 = 0; 693 cur_p->app4 = 0;
639 694
640 ndev->stats.tx_packets++; 695 ndev->stats.tx_packets++;
641 ndev->stats.tx_bytes += cur_p->len; 696 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
642 697
643 lp->tx_bd_ci++; 698 lp->tx_bd_ci++;
644 if (lp->tx_bd_ci >= TX_BD_NUM) 699 if (lp->tx_bd_ci >= TX_BD_NUM)
645 lp->tx_bd_ci = 0; 700 lp->tx_bd_ci = 0;
646 701
647 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 702 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
648 stat = cur_p->app0; 703 stat = be32_to_cpu(cur_p->app0);
649 } 704 }
650 705
651 netif_wake_queue(ndev); 706 netif_wake_queue(ndev);
@@ -679,7 +734,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
679{ 734{
680 struct temac_local *lp = netdev_priv(ndev); 735 struct temac_local *lp = netdev_priv(ndev);
681 struct cdmac_bd *cur_p; 736 struct cdmac_bd *cur_p;
682 dma_addr_t start_p, tail_p; 737 dma_addr_t start_p, tail_p, skb_dma_addr;
683 int ii; 738 int ii;
684 unsigned long num_frag; 739 unsigned long num_frag;
685 skb_frag_t *frag; 740 skb_frag_t *frag;
@@ -689,7 +744,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
689 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 744 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
690 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 745 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
691 746
692 if (temac_check_tx_bd_space(lp, num_frag)) { 747 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
693 if (!netif_queue_stopped(ndev)) 748 if (!netif_queue_stopped(ndev))
694 netif_stop_queue(ndev); 749 netif_stop_queue(ndev);
695 return NETDEV_TX_BUSY; 750 return NETDEV_TX_BUSY;
@@ -700,16 +755,18 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
700 unsigned int csum_start_off = skb_checksum_start_offset(skb); 755 unsigned int csum_start_off = skb_checksum_start_offset(skb);
701 unsigned int csum_index_off = csum_start_off + skb->csum_offset; 756 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
702 757
703 cur_p->app0 |= 1; /* TX Checksum Enabled */ 758 cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
704 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 759 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
760 | csum_index_off);
705 cur_p->app2 = 0; /* initial checksum seed */ 761 cur_p->app2 = 0; /* initial checksum seed */
706 } 762 }
707 763
708 cur_p->app0 |= STS_CTRL_APP0_SOP; 764 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
709 cur_p->len = skb_headlen(skb); 765 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
710 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 766 skb_headlen(skb), DMA_TO_DEVICE);
711 skb_headlen(skb), DMA_TO_DEVICE); 767 cur_p->len = cpu_to_be32(skb_headlen(skb));
712 cur_p->app4 = (unsigned long)skb; 768 cur_p->phys = cpu_to_be32(skb_dma_addr);
769 ptr_to_txbd((void *)skb, cur_p);
713 770
714 for (ii = 0; ii < num_frag; ii++) { 771 for (ii = 0; ii < num_frag; ii++) {
715 lp->tx_bd_tail++; 772 lp->tx_bd_tail++;
@@ -717,14 +774,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
717 lp->tx_bd_tail = 0; 774 lp->tx_bd_tail = 0;
718 775
719 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 776 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
720 cur_p->phys = dma_map_single(ndev->dev.parent, 777 skb_dma_addr = dma_map_single(ndev->dev.parent,
721 skb_frag_address(frag), 778 skb_frag_address(frag),
722 skb_frag_size(frag), DMA_TO_DEVICE); 779 skb_frag_size(frag),
723 cur_p->len = skb_frag_size(frag); 780 DMA_TO_DEVICE);
781 cur_p->phys = cpu_to_be32(skb_dma_addr);
782 cur_p->len = cpu_to_be32(skb_frag_size(frag));
724 cur_p->app0 = 0; 783 cur_p->app0 = 0;
725 frag++; 784 frag++;
726 } 785 }
727 cur_p->app0 |= STS_CTRL_APP0_EOP; 786 cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
728 787
729 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 788 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
730 lp->tx_bd_tail++; 789 lp->tx_bd_tail++;
@@ -734,6 +793,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
734 skb_tx_timestamp(skb); 793 skb_tx_timestamp(skb);
735 794
736 /* Kick off the transfer */ 795 /* Kick off the transfer */
796 wmb();
737 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ 797 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
738 798
739 return NETDEV_TX_OK; 799 return NETDEV_TX_OK;
@@ -746,7 +806,7 @@ static void ll_temac_recv(struct net_device *ndev)
746 struct sk_buff *skb, *new_skb; 806 struct sk_buff *skb, *new_skb;
747 unsigned int bdstat; 807 unsigned int bdstat;
748 struct cdmac_bd *cur_p; 808 struct cdmac_bd *cur_p;
749 dma_addr_t tail_p; 809 dma_addr_t tail_p, skb_dma_addr;
750 int length; 810 int length;
751 unsigned long flags; 811 unsigned long flags;
752 812
@@ -755,14 +815,14 @@ static void ll_temac_recv(struct net_device *ndev)
755 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 815 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
756 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 816 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
757 817
758 bdstat = cur_p->app0; 818 bdstat = be32_to_cpu(cur_p->app0);
759 while ((bdstat & STS_CTRL_APP0_CMPLT)) { 819 while ((bdstat & STS_CTRL_APP0_CMPLT)) {
760 820
761 skb = lp->rx_skb[lp->rx_bd_ci]; 821 skb = lp->rx_skb[lp->rx_bd_ci];
762 length = cur_p->app4 & 0x3FFF; 822 length = be32_to_cpu(cur_p->app4) & 0x3FFF;
763 823
764 dma_unmap_single(ndev->dev.parent, cur_p->phys, length, 824 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
765 DMA_FROM_DEVICE); 825 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
766 826
767 skb_put(skb, length); 827 skb_put(skb, length);
768 skb->protocol = eth_type_trans(skb, ndev); 828 skb->protocol = eth_type_trans(skb, ndev);
@@ -773,7 +833,12 @@ static void ll_temac_recv(struct net_device *ndev)
773 (skb->protocol == htons(ETH_P_IP)) && 833 (skb->protocol == htons(ETH_P_IP)) &&
774 (skb->len > 64)) { 834 (skb->len > 64)) {
775 835
776 skb->csum = cur_p->app3 & 0xFFFF; 836 /* Convert from device endianness (be32) to cpu
837 * endiannes, and if necessary swap the bytes
838 * (back) for proper IP checksum byte order
839 * (be16).
840 */
841 skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
777 skb->ip_summed = CHECKSUM_COMPLETE; 842 skb->ip_summed = CHECKSUM_COMPLETE;
778 } 843 }
779 844
@@ -790,11 +855,12 @@ static void ll_temac_recv(struct net_device *ndev)
790 return; 855 return;
791 } 856 }
792 857
793 cur_p->app0 = STS_CTRL_APP0_IRQONEND; 858 cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
794 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 859 skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
795 XTE_MAX_JUMBO_FRAME_SIZE, 860 XTE_MAX_JUMBO_FRAME_SIZE,
796 DMA_FROM_DEVICE); 861 DMA_FROM_DEVICE);
797 cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE; 862 cur_p->phys = cpu_to_be32(skb_dma_addr);
863 cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
798 lp->rx_skb[lp->rx_bd_ci] = new_skb; 864 lp->rx_skb[lp->rx_bd_ci] = new_skb;
799 865
800 lp->rx_bd_ci++; 866 lp->rx_bd_ci++;
@@ -802,7 +868,7 @@ static void ll_temac_recv(struct net_device *ndev)
802 lp->rx_bd_ci = 0; 868 lp->rx_bd_ci = 0;
803 869
804 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 870 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
805 bdstat = cur_p->app0; 871 bdstat = be32_to_cpu(cur_p->app0);
806 } 872 }
807 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); 873 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
808 874
@@ -857,7 +923,14 @@ static int temac_open(struct net_device *ndev)
857 dev_err(lp->dev, "of_phy_connect() failed\n"); 923 dev_err(lp->dev, "of_phy_connect() failed\n");
858 return -ENODEV; 924 return -ENODEV;
859 } 925 }
860 926 phy_start(phydev);
927 } else if (strlen(lp->phy_name) > 0) {
928 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
929 lp->phy_interface);
930 if (!phydev) {
931 dev_err(lp->dev, "phy_connect() failed\n");
932 return -ENODEV;
933 }
861 phy_start(phydev); 934 phy_start(phydev);
862 } 935 }
863 936
@@ -977,22 +1050,25 @@ static const struct ethtool_ops temac_ethtool_ops = {
977 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1050 .set_link_ksettings = phy_ethtool_set_link_ksettings,
978}; 1051};
979 1052
980static int temac_of_probe(struct platform_device *op) 1053static int temac_probe(struct platform_device *pdev)
981{ 1054{
982 struct device_node *np; 1055 struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1056 struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
983 struct temac_local *lp; 1057 struct temac_local *lp;
984 struct net_device *ndev; 1058 struct net_device *ndev;
1059 struct resource *res;
985 const void *addr; 1060 const void *addr;
986 __be32 *p; 1061 __be32 *p;
1062 bool little_endian;
987 int rc = 0; 1063 int rc = 0;
988 1064
989 /* Init network device structure */ 1065 /* Init network device structure */
990 ndev = alloc_etherdev(sizeof(*lp)); 1066 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
991 if (!ndev) 1067 if (!ndev)
992 return -ENOMEM; 1068 return -ENOMEM;
993 1069
994 platform_set_drvdata(op, ndev); 1070 platform_set_drvdata(pdev, ndev);
995 SET_NETDEV_DEV(ndev, &op->dev); 1071 SET_NETDEV_DEV(ndev, &pdev->dev);
996 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1072 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
997 ndev->features = NETIF_F_SG; 1073 ndev->features = NETIF_F_SG;
998 ndev->netdev_ops = &temac_netdev_ops; 1074 ndev->netdev_ops = &temac_netdev_ops;
@@ -1014,89 +1090,196 @@ static int temac_of_probe(struct platform_device *op)
1014 /* setup temac private info structure */ 1090 /* setup temac private info structure */
1015 lp = netdev_priv(ndev); 1091 lp = netdev_priv(ndev);
1016 lp->ndev = ndev; 1092 lp->ndev = ndev;
1017 lp->dev = &op->dev; 1093 lp->dev = &pdev->dev;
1018 lp->options = XTE_OPTION_DEFAULTS; 1094 lp->options = XTE_OPTION_DEFAULTS;
1019 spin_lock_init(&lp->rx_lock); 1095 spin_lock_init(&lp->rx_lock);
1020 mutex_init(&lp->indirect_mutex); 1096
1097 /* Setup mutex for synchronization of indirect register access */
1098 if (pdata) {
1099 if (!pdata->indirect_mutex) {
1100 dev_err(&pdev->dev,
1101 "indirect_mutex missing in platform_data\n");
1102 return -EINVAL;
1103 }
1104 lp->indirect_mutex = pdata->indirect_mutex;
1105 } else {
1106 lp->indirect_mutex = devm_kmalloc(&pdev->dev,
1107 sizeof(*lp->indirect_mutex),
1108 GFP_KERNEL);
1109 mutex_init(lp->indirect_mutex);
1110 }
1021 1111
1022 /* map device registers */ 1112 /* map device registers */
1023 lp->regs = of_iomap(op->dev.of_node, 0); 1113 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1024 if (!lp->regs) { 1114 lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
1025 dev_err(&op->dev, "could not map temac regs.\n"); 1115 resource_size(res));
1026 rc = -ENOMEM; 1116 if (IS_ERR(lp->regs)) {
1027 goto nodev; 1117 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1118 return PTR_ERR(lp->regs);
1119 }
1120
1121 /* Select register access functions with the specified
1122 * endianness mode. Default for OF devices is big-endian.
1123 */
1124 little_endian = false;
1125 if (temac_np) {
1126 if (of_get_property(temac_np, "little-endian", NULL))
1127 little_endian = true;
1128 } else if (pdata) {
1129 little_endian = pdata->reg_little_endian;
1130 }
1131 if (little_endian) {
1132 lp->temac_ior = _temac_ior_le;
1133 lp->temac_iow = _temac_iow_le;
1134 } else {
1135 lp->temac_ior = _temac_ior_be;
1136 lp->temac_iow = _temac_iow_be;
1028 } 1137 }
1029 1138
1030 /* Setup checksum offload, but default to off if not specified */ 1139 /* Setup checksum offload, but default to off if not specified */
1031 lp->temac_features = 0; 1140 lp->temac_features = 0;
1032 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); 1141 if (temac_np) {
1033 if (p && be32_to_cpu(*p)) { 1142 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1034 lp->temac_features |= TEMAC_FEATURE_TX_CSUM; 1143 if (p && be32_to_cpu(*p))
1144 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1145 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1146 if (p && be32_to_cpu(*p))
1147 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1148 } else if (pdata) {
1149 if (pdata->txcsum)
1150 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1151 if (pdata->rxcsum)
1152 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1153 }
1154 if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1035 /* Can checksum TCP/UDP over IPv4. */ 1155 /* Can checksum TCP/UDP over IPv4. */
1036 ndev->features |= NETIF_F_IP_CSUM; 1156 ndev->features |= NETIF_F_IP_CSUM;
1037 }
1038 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1039 if (p && be32_to_cpu(*p))
1040 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1041
1042 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1043 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
1044 if (!np) {
1045 dev_err(&op->dev, "could not find DMA node\n");
1046 rc = -ENODEV;
1047 goto err_iounmap;
1048 }
1049 1157
1050 /* Setup the DMA register accesses, could be DCR or memory mapped */ 1158 /* Setup LocalLink DMA */
1051 if (temac_dcr_setup(lp, op, np)) { 1159 if (temac_np) {
1160 /* Find the DMA node, map the DMA registers, and
1161 * decode the DMA IRQs.
1162 */
1163 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1164 if (!dma_np) {
1165 dev_err(&pdev->dev, "could not find DMA node\n");
1166 return -ENODEV;
1167 }
1052 1168
1053 /* no DCR in the device tree, try non-DCR */ 1169 /* Setup the DMA register accesses, could be DCR or
1054 lp->sdma_regs = of_iomap(np, 0); 1170 * memory mapped.
1055 if (lp->sdma_regs) { 1171 */
1056 lp->dma_in = temac_dma_in32; 1172 if (temac_dcr_setup(lp, pdev, dma_np)) {
1057 lp->dma_out = temac_dma_out32; 1173 /* no DCR in the device tree, try non-DCR */
1058 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); 1174 lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1059 } else { 1175 NULL);
1060 dev_err(&op->dev, "unable to map DMA registers\n"); 1176 if (IS_ERR(lp->sdma_regs)) {
1061 of_node_put(np); 1177 dev_err(&pdev->dev,
1062 goto err_iounmap; 1178 "unable to map DMA registers\n");
1179 of_node_put(dma_np);
1180 return PTR_ERR(lp->sdma_regs);
1181 }
1182 if (of_get_property(dma_np, "little-endian", NULL)) {
1183 lp->dma_in = temac_dma_in32_le;
1184 lp->dma_out = temac_dma_out32_le;
1185 } else {
1186 lp->dma_in = temac_dma_in32_be;
1187 lp->dma_out = temac_dma_out32_be;
1188 }
1189 dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1063 } 1190 }
1064 }
1065 1191
1066 lp->rx_irq = irq_of_parse_and_map(np, 0); 1192 /* Get DMA RX and TX interrupts */
1067 lp->tx_irq = irq_of_parse_and_map(np, 1); 1193 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1194 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1068 1195
1069 of_node_put(np); /* Finished with the DMA node; drop the reference */ 1196 /* Use defaults for IRQ delay/coalescing setup. These
1197 * are configuration values, so does not belong in
1198 * device-tree.
1199 */
1200 lp->tx_chnl_ctrl = 0x10220000;
1201 lp->rx_chnl_ctrl = 0xff070000;
1202
1203 /* Finished with the DMA node; drop the reference */
1204 of_node_put(dma_np);
1205 } else if (pdata) {
1206 /* 2nd memory resource specifies DMA registers */
1207 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1208 lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
1209 resource_size(res));
1210 if (IS_ERR(lp->sdma_regs)) {
1211 dev_err(&pdev->dev,
1212 "could not map DMA registers\n");
1213 return PTR_ERR(lp->sdma_regs);
1214 }
1215 if (pdata->dma_little_endian) {
1216 lp->dma_in = temac_dma_in32_le;
1217 lp->dma_out = temac_dma_out32_le;
1218 } else {
1219 lp->dma_in = temac_dma_in32_be;
1220 lp->dma_out = temac_dma_out32_be;
1221 }
1070 1222
1071 if (!lp->rx_irq || !lp->tx_irq) { 1223 /* Get DMA RX and TX interrupts */
1072 dev_err(&op->dev, "could not determine irqs\n"); 1224 lp->rx_irq = platform_get_irq(pdev, 0);
1073 rc = -ENOMEM; 1225 lp->tx_irq = platform_get_irq(pdev, 1);
1074 goto err_iounmap_2; 1226
1227 /* IRQ delay/coalescing setup */
1228 if (pdata->tx_irq_timeout || pdata->tx_irq_count)
1229 lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
1230 (pdata->tx_irq_count << 16);
1231 else
1232 lp->tx_chnl_ctrl = 0x10220000;
1233 if (pdata->rx_irq_timeout || pdata->rx_irq_count)
1234 lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
1235 (pdata->rx_irq_count << 16);
1236 else
1237 lp->rx_chnl_ctrl = 0xff070000;
1075 } 1238 }
1076 1239
1240 /* Error handle returned DMA RX and TX interrupts */
1241 if (lp->rx_irq < 0) {
1242 if (lp->rx_irq != -EPROBE_DEFER)
1243 dev_err(&pdev->dev, "could not get DMA RX irq\n");
1244 return lp->rx_irq;
1245 }
1246 if (lp->tx_irq < 0) {
1247 if (lp->tx_irq != -EPROBE_DEFER)
1248 dev_err(&pdev->dev, "could not get DMA TX irq\n");
1249 return lp->tx_irq;
1250 }
1077 1251
1078 /* Retrieve the MAC address */ 1252 if (temac_np) {
1079 addr = of_get_mac_address(op->dev.of_node); 1253 /* Retrieve the MAC address */
1080 if (!addr) { 1254 addr = of_get_mac_address(temac_np);
1081 dev_err(&op->dev, "could not find MAC address\n"); 1255 if (!addr) {
1082 rc = -ENODEV; 1256 dev_err(&pdev->dev, "could not find MAC address\n");
1083 goto err_iounmap_2; 1257 return -ENODEV;
1258 }
1259 temac_init_mac_address(ndev, addr);
1260 } else if (pdata) {
1261 temac_init_mac_address(ndev, pdata->mac_addr);
1084 } 1262 }
1085 temac_init_mac_address(ndev, addr);
1086 1263
1087 rc = temac_mdio_setup(lp, op->dev.of_node); 1264 rc = temac_mdio_setup(lp, pdev);
1088 if (rc) 1265 if (rc)
1089 dev_warn(&op->dev, "error registering MDIO bus\n"); 1266 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1090 1267
1091 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); 1268 if (temac_np) {
1092 if (lp->phy_node) 1269 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1093 dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np); 1270 if (lp->phy_node)
1271 dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1272 } else if (pdata) {
1273 snprintf(lp->phy_name, sizeof(lp->phy_name),
1274 PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1275 lp->phy_interface = pdata->phy_interface;
1276 }
1094 1277
1095 /* Add the device attributes */ 1278 /* Add the device attributes */
1096 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); 1279 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1097 if (rc) { 1280 if (rc) {
1098 dev_err(lp->dev, "Error creating sysfs files\n"); 1281 dev_err(lp->dev, "Error creating sysfs files\n");
1099 goto err_iounmap_2; 1282 goto err_sysfs_create;
1100 } 1283 }
1101 1284
1102 rc = register_netdev(lp->ndev); 1285 rc = register_netdev(lp->ndev);
@@ -1107,33 +1290,25 @@ static int temac_of_probe(struct platform_device *op)
1107 1290
1108 return 0; 1291 return 0;
1109 1292
1110 err_register_ndev: 1293err_register_ndev:
1111 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); 1294 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1112 err_iounmap_2: 1295err_sysfs_create:
1113 if (lp->sdma_regs) 1296 if (lp->phy_node)
1114 iounmap(lp->sdma_regs); 1297 of_node_put(lp->phy_node);
1115 err_iounmap: 1298 temac_mdio_teardown(lp);
1116 iounmap(lp->regs);
1117 nodev:
1118 free_netdev(ndev);
1119 ndev = NULL;
1120 return rc; 1299 return rc;
1121} 1300}
1122 1301
1123static int temac_of_remove(struct platform_device *op) 1302static int temac_remove(struct platform_device *pdev)
1124{ 1303{
1125 struct net_device *ndev = platform_get_drvdata(op); 1304 struct net_device *ndev = platform_get_drvdata(pdev);
1126 struct temac_local *lp = netdev_priv(ndev); 1305 struct temac_local *lp = netdev_priv(ndev);
1127 1306
1128 temac_mdio_teardown(lp);
1129 unregister_netdev(ndev); 1307 unregister_netdev(ndev);
1130 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); 1308 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1131 of_node_put(lp->phy_node); 1309 if (lp->phy_node)
1132 lp->phy_node = NULL; 1310 of_node_put(lp->phy_node);
1133 iounmap(lp->regs); 1311 temac_mdio_teardown(lp);
1134 if (lp->sdma_regs)
1135 iounmap(lp->sdma_regs);
1136 free_netdev(ndev);
1137 return 0; 1312 return 0;
1138} 1313}
1139 1314
@@ -1146,16 +1321,16 @@ static const struct of_device_id temac_of_match[] = {
1146}; 1321};
1147MODULE_DEVICE_TABLE(of, temac_of_match); 1322MODULE_DEVICE_TABLE(of, temac_of_match);
1148 1323
1149static struct platform_driver temac_of_driver = { 1324static struct platform_driver temac_driver = {
1150 .probe = temac_of_probe, 1325 .probe = temac_probe,
1151 .remove = temac_of_remove, 1326 .remove = temac_remove,
1152 .driver = { 1327 .driver = {
1153 .name = "xilinx_temac", 1328 .name = "xilinx_temac",
1154 .of_match_table = temac_of_match, 1329 .of_match_table = temac_of_match,
1155 }, 1330 },
1156}; 1331};
1157 1332
1158module_platform_driver(temac_of_driver); 1333module_platform_driver(temac_driver);
1159 1334
1160MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); 1335MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1161MODULE_AUTHOR("Yoshio Kashiwagi"); 1336MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index f5e83ac6f7e2..c2a11703bc6d 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -14,6 +14,7 @@
14#include <linux/of_address.h> 14#include <linux/of_address.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_mdio.h> 16#include <linux/of_mdio.h>
17#include <linux/platform_data/xilinx-ll-temac.h>
17 18
18#include "ll_temac.h" 19#include "ll_temac.h"
19 20
@@ -28,10 +29,10 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
28 /* Write the PHY address to the MIIM Access Initiator register. 29 /* Write the PHY address to the MIIM Access Initiator register.
29 * When the transfer completes, the PHY register value will appear 30 * When the transfer completes, the PHY register value will appear
30 * in the LSW0 register */ 31 * in the LSW0 register */
31 mutex_lock(&lp->indirect_mutex); 32 mutex_lock(lp->indirect_mutex);
32 temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg); 33 temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
33 rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET); 34 rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
34 mutex_unlock(&lp->indirect_mutex); 35 mutex_unlock(lp->indirect_mutex);
35 36
36 dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n", 37 dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
37 phy_id, reg, rc); 38 phy_id, reg, rc);
@@ -49,25 +50,34 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
49 /* First write the desired value into the write data register 50 /* First write the desired value into the write data register
50 * and then write the address into the access initiator register 51 * and then write the address into the access initiator register
51 */ 52 */
52 mutex_lock(&lp->indirect_mutex); 53 mutex_lock(lp->indirect_mutex);
53 temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val); 54 temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
54 temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg); 55 temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
55 mutex_unlock(&lp->indirect_mutex); 56 mutex_unlock(lp->indirect_mutex);
56 57
57 return 0; 58 return 0;
58} 59}
59 60
60int temac_mdio_setup(struct temac_local *lp, struct device_node *np) 61int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
61{ 62{
63 struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
64 struct device_node *np = dev_of_node(&pdev->dev);
62 struct mii_bus *bus; 65 struct mii_bus *bus;
63 u32 bus_hz; 66 u32 bus_hz;
64 int clk_div; 67 int clk_div;
65 int rc; 68 int rc;
66 struct resource res; 69 struct resource res;
67 70
71 /* Get MDIO bus frequency (if specified) */
72 bus_hz = 0;
73 if (np)
74 of_property_read_u32(np, "clock-frequency", &bus_hz);
75 else if (pdata)
76 bus_hz = pdata->mdio_clk_freq;
77
68 /* Calculate a reasonable divisor for the clock rate */ 78 /* Calculate a reasonable divisor for the clock rate */
69 clk_div = 0x3f; /* worst-case default setting */ 79 clk_div = 0x3f; /* worst-case default setting */
70 if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) { 80 if (bus_hz != 0) {
71 clk_div = bus_hz / (2500 * 1000 * 2) - 1; 81 clk_div = bus_hz / (2500 * 1000 * 2) - 1;
72 if (clk_div < 1) 82 if (clk_div < 1)
73 clk_div = 1; 83 clk_div = 1;
@@ -77,17 +87,23 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
77 87
78 /* Enable the MDIO bus by asserting the enable bit and writing 88 /* Enable the MDIO bus by asserting the enable bit and writing
79 * in the clock config */ 89 * in the clock config */
80 mutex_lock(&lp->indirect_mutex); 90 mutex_lock(lp->indirect_mutex);
81 temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div); 91 temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
82 mutex_unlock(&lp->indirect_mutex); 92 mutex_unlock(lp->indirect_mutex);
83 93
84 bus = mdiobus_alloc(); 94 bus = devm_mdiobus_alloc(&pdev->dev);
85 if (!bus) 95 if (!bus)
86 return -ENOMEM; 96 return -ENOMEM;
87 97
88 of_address_to_resource(np, 0, &res); 98 if (np) {
89 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", 99 of_address_to_resource(np, 0, &res);
90 (unsigned long long)res.start); 100 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
101 (unsigned long long)res.start);
102 } else if (pdata && pdata->mdio_bus_id >= 0) {
103 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
104 pdata->mdio_bus_id);
105 }
106
91 bus->priv = lp; 107 bus->priv = lp;
92 bus->name = "Xilinx TEMAC MDIO"; 108 bus->name = "Xilinx TEMAC MDIO";
93 bus->read = temac_mdio_read; 109 bus->read = temac_mdio_read;
@@ -98,23 +114,16 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
98 114
99 rc = of_mdiobus_register(bus, np); 115 rc = of_mdiobus_register(bus, np);
100 if (rc) 116 if (rc)
101 goto err_register; 117 return rc;
102 118
103 mutex_lock(&lp->indirect_mutex); 119 mutex_lock(lp->indirect_mutex);
104 dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n", 120 dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
105 temac_indirect_in32(lp, XTE_MC_OFFSET)); 121 temac_indirect_in32(lp, XTE_MC_OFFSET));
106 mutex_unlock(&lp->indirect_mutex); 122 mutex_unlock(lp->indirect_mutex);
107 return 0; 123 return 0;
108
109 err_register:
110 mdiobus_free(bus);
111 return rc;
112} 124}
113 125
114void temac_mdio_teardown(struct temac_local *lp) 126void temac_mdio_teardown(struct temac_local *lp)
115{ 127{
116 mdiobus_unregister(lp->mii_bus); 128 mdiobus_unregister(lp->mii_bus);
117 mdiobus_free(lp->mii_bus);
118 lp->mii_bus = NULL;
119} 129}
120