aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence/macb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cadence/macb.c')
-rw-r--r--drivers/net/ethernet/cadence/macb.c556
1 files changed, 387 insertions, 169 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 033064b7b576..6a59bce38a8c 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -14,8 +14,10 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/circ_buf.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/gpio.h>
19#include <linux/interrupt.h> 21#include <linux/interrupt.h>
20#include <linux/netdevice.h> 22#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
@@ -26,37 +28,74 @@
26#include <linux/of.h> 28#include <linux/of.h>
27#include <linux/of_device.h> 29#include <linux/of_device.h>
28#include <linux/of_net.h> 30#include <linux/of_net.h>
31#include <linux/pinctrl/consumer.h>
29 32
30#include "macb.h" 33#include "macb.h"
31 34
32#define RX_BUFFER_SIZE 128 35#define RX_BUFFER_SIZE 128
33#define RX_RING_SIZE 512 36#define RX_RING_SIZE 512 /* must be power of 2 */
34#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 37#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
35 38
36/* Make the IP header word-aligned (the ethernet header is 14 bytes) */ 39#define TX_RING_SIZE 128 /* must be power of 2 */
37#define RX_OFFSET 2 40#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
38 41
39#define TX_RING_SIZE 128 42/* level of occupied TX descriptors under which we wake up TX process */
40#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) 43#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
41#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
42 44
43#define TX_RING_GAP(bp) \ 45#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
44 (TX_RING_SIZE - (bp)->tx_pending) 46 | MACB_BIT(ISR_ROVR))
45#define TX_BUFFS_AVAIL(bp) \ 47#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
46 (((bp)->tx_tail <= (bp)->tx_head) ? \ 48 | MACB_BIT(ISR_RLE) \
47 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ 49 | MACB_BIT(TXERR))
48 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) 50#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
49#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
50 51
51#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) 52/*
53 * Graceful stop timeouts in us. We should allow up to
54 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
55 */
56#define MACB_HALT_TIMEOUT 1230
52 57
53/* minimum number of free TX descriptors before waking up TX process */ 58/* Ring buffer accessors */
54#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) 59static unsigned int macb_tx_ring_wrap(unsigned int index)
60{
61 return index & (TX_RING_SIZE - 1);
62}
55 63
56#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 64static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
57 | MACB_BIT(ISR_ROVR)) 65{
66 return &bp->tx_ring[macb_tx_ring_wrap(index)];
67}
58 68
59static void __macb_set_hwaddr(struct macb *bp) 69static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
70{
71 return &bp->tx_skb[macb_tx_ring_wrap(index)];
72}
73
74static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
75{
76 dma_addr_t offset;
77
78 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
79
80 return bp->tx_ring_dma + offset;
81}
82
83static unsigned int macb_rx_ring_wrap(unsigned int index)
84{
85 return index & (RX_RING_SIZE - 1);
86}
87
88static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
89{
90 return &bp->rx_ring[macb_rx_ring_wrap(index)];
91}
92
93static void *macb_rx_buffer(struct macb *bp, unsigned int index)
94{
95 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
96}
97
98void macb_set_hwaddr(struct macb *bp)
60{ 99{
61 u32 bottom; 100 u32 bottom;
62 u16 top; 101 u16 top;
@@ -65,31 +104,58 @@ static void __macb_set_hwaddr(struct macb *bp)
65 macb_or_gem_writel(bp, SA1B, bottom); 104 macb_or_gem_writel(bp, SA1B, bottom);
66 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 105 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
67 macb_or_gem_writel(bp, SA1T, top); 106 macb_or_gem_writel(bp, SA1T, top);
107
108 /* Clear unused address register sets */
109 macb_or_gem_writel(bp, SA2B, 0);
110 macb_or_gem_writel(bp, SA2T, 0);
111 macb_or_gem_writel(bp, SA3B, 0);
112 macb_or_gem_writel(bp, SA3T, 0);
113 macb_or_gem_writel(bp, SA4B, 0);
114 macb_or_gem_writel(bp, SA4T, 0);
68} 115}
116EXPORT_SYMBOL_GPL(macb_set_hwaddr);
69 117
70static void __init macb_get_hwaddr(struct macb *bp) 118void macb_get_hwaddr(struct macb *bp)
71{ 119{
120 struct macb_platform_data *pdata;
72 u32 bottom; 121 u32 bottom;
73 u16 top; 122 u16 top;
74 u8 addr[6]; 123 u8 addr[6];
124 int i;
75 125
76 bottom = macb_or_gem_readl(bp, SA1B); 126 pdata = bp->pdev->dev.platform_data;
77 top = macb_or_gem_readl(bp, SA1T);
78 127
79 addr[0] = bottom & 0xff; 128 /* Check all 4 address register for vaild address */
80 addr[1] = (bottom >> 8) & 0xff; 129 for (i = 0; i < 4; i++) {
81 addr[2] = (bottom >> 16) & 0xff; 130 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
82 addr[3] = (bottom >> 24) & 0xff; 131 top = macb_or_gem_readl(bp, SA1T + i * 8);
83 addr[4] = top & 0xff; 132
84 addr[5] = (top >> 8) & 0xff; 133 if (pdata && pdata->rev_eth_addr) {
134 addr[5] = bottom & 0xff;
135 addr[4] = (bottom >> 8) & 0xff;
136 addr[3] = (bottom >> 16) & 0xff;
137 addr[2] = (bottom >> 24) & 0xff;
138 addr[1] = top & 0xff;
139 addr[0] = (top & 0xff00) >> 8;
140 } else {
141 addr[0] = bottom & 0xff;
142 addr[1] = (bottom >> 8) & 0xff;
143 addr[2] = (bottom >> 16) & 0xff;
144 addr[3] = (bottom >> 24) & 0xff;
145 addr[4] = top & 0xff;
146 addr[5] = (top >> 8) & 0xff;
147 }
85 148
86 if (is_valid_ether_addr(addr)) { 149 if (is_valid_ether_addr(addr)) {
87 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 150 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
88 } else { 151 return;
89 netdev_info(bp->dev, "invalid hw address, using random\n"); 152 }
90 eth_hw_addr_random(bp->dev);
91 } 153 }
154
155 netdev_info(bp->dev, "invalid hw address, using random\n");
156 eth_hw_addr_random(bp->dev);
92} 157}
158EXPORT_SYMBOL_GPL(macb_get_hwaddr);
93 159
94static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 160static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
95{ 161{
@@ -152,13 +218,17 @@ static void macb_handle_link_change(struct net_device *dev)
152 218
153 reg = macb_readl(bp, NCFGR); 219 reg = macb_readl(bp, NCFGR);
154 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 220 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
221 if (macb_is_gem(bp))
222 reg &= ~GEM_BIT(GBE);
155 223
156 if (phydev->duplex) 224 if (phydev->duplex)
157 reg |= MACB_BIT(FD); 225 reg |= MACB_BIT(FD);
158 if (phydev->speed == SPEED_100) 226 if (phydev->speed == SPEED_100)
159 reg |= MACB_BIT(SPD); 227 reg |= MACB_BIT(SPD);
228 if (phydev->speed == SPEED_1000)
229 reg |= GEM_BIT(GBE);
160 230
161 macb_writel(bp, NCFGR, reg); 231 macb_or_gem_writel(bp, NCFGR, reg);
162 232
163 bp->speed = phydev->speed; 233 bp->speed = phydev->speed;
164 bp->duplex = phydev->duplex; 234 bp->duplex = phydev->duplex;
@@ -196,7 +266,9 @@ static void macb_handle_link_change(struct net_device *dev)
196static int macb_mii_probe(struct net_device *dev) 266static int macb_mii_probe(struct net_device *dev)
197{ 267{
198 struct macb *bp = netdev_priv(dev); 268 struct macb *bp = netdev_priv(dev);
269 struct macb_platform_data *pdata;
199 struct phy_device *phydev; 270 struct phy_device *phydev;
271 int phy_irq;
200 int ret; 272 int ret;
201 273
202 phydev = phy_find_first(bp->mii_bus); 274 phydev = phy_find_first(bp->mii_bus);
@@ -205,7 +277,14 @@ static int macb_mii_probe(struct net_device *dev)
205 return -1; 277 return -1;
206 } 278 }
207 279
208 /* TODO : add pin_irq */ 280 pdata = dev_get_platdata(&bp->pdev->dev);
281 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
282 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
283 if (!ret) {
284 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
285 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
286 }
287 }
209 288
210 /* attach the mac to the phy */ 289 /* attach the mac to the phy */
211 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, 290 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
@@ -216,7 +295,10 @@ static int macb_mii_probe(struct net_device *dev)
216 } 295 }
217 296
218 /* mask with MAC supported features */ 297 /* mask with MAC supported features */
219 phydev->supported &= PHY_BASIC_FEATURES; 298 if (macb_is_gem(bp))
299 phydev->supported &= PHY_GBIT_FEATURES;
300 else
301 phydev->supported &= PHY_BASIC_FEATURES;
220 302
221 phydev->advertising = phydev->supported; 303 phydev->advertising = phydev->supported;
222 304
@@ -228,7 +310,7 @@ static int macb_mii_probe(struct net_device *dev)
228 return 0; 310 return 0;
229} 311}
230 312
231static int macb_mii_init(struct macb *bp) 313int macb_mii_init(struct macb *bp)
232{ 314{
233 struct macb_platform_data *pdata; 315 struct macb_platform_data *pdata;
234 int err = -ENXIO, i; 316 int err = -ENXIO, i;
@@ -284,6 +366,7 @@ err_out_free_mdiobus:
284err_out: 366err_out:
285 return err; 367 return err;
286} 368}
369EXPORT_SYMBOL_GPL(macb_mii_init);
287 370
288static void macb_update_stats(struct macb *bp) 371static void macb_update_stats(struct macb *bp)
289{ 372{
@@ -297,93 +380,148 @@ static void macb_update_stats(struct macb *bp)
297 *p += __raw_readl(reg); 380 *p += __raw_readl(reg);
298} 381}
299 382
300static void macb_tx(struct macb *bp) 383static int macb_halt_tx(struct macb *bp)
301{ 384{
302 unsigned int tail; 385 unsigned long halt_time, timeout;
303 unsigned int head; 386 u32 status;
304 u32 status;
305 387
306 status = macb_readl(bp, TSR); 388 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
307 macb_writel(bp, TSR, status);
308 389
309 netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); 390 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
391 do {
392 halt_time = jiffies;
393 status = macb_readl(bp, TSR);
394 if (!(status & MACB_BIT(TGO)))
395 return 0;
310 396
311 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { 397 usleep_range(10, 250);
312 int i; 398 } while (time_before(halt_time, timeout));
313 netdev_err(bp->dev, "TX %s, resetting buffers\n",
314 status & MACB_BIT(UND) ?
315 "underrun" : "retry limit exceeded");
316 399
317 /* Transfer ongoing, disable transmitter, to avoid confusion */ 400 return -ETIMEDOUT;
318 if (status & MACB_BIT(TGO)) 401}
319 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
320 402
321 head = bp->tx_head; 403static void macb_tx_error_task(struct work_struct *work)
404{
405 struct macb *bp = container_of(work, struct macb, tx_error_task);
406 struct macb_tx_skb *tx_skb;
407 struct sk_buff *skb;
408 unsigned int tail;
322 409
323 /*Mark all the buffer as used to avoid sending a lost buffer*/ 410 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
324 for (i = 0; i < TX_RING_SIZE; i++) 411 bp->tx_tail, bp->tx_head);
325 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
326 412
327 /* Add wrap bit */ 413 /* Make sure nobody is trying to queue up new packets */
328 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 414 netif_stop_queue(bp->dev);
329 415
330 /* free transmit buffer in upper layer*/ 416 /*
331 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 417 * Stop transmission now
332 struct ring_info *rp = &bp->tx_skb[tail]; 418 * (in case we have just queued new packets)
333 struct sk_buff *skb = rp->skb; 419 */
420 if (macb_halt_tx(bp))
421 /* Just complain for now, reinitializing TX path can be good */
422 netdev_err(bp->dev, "BUG: halt tx timed out\n");
334 423
335 BUG_ON(skb == NULL); 424 /* No need for the lock here as nobody will interrupt us anymore */
336 425
337 rmb(); 426 /*
427 * Treat frames in TX queue including the ones that caused the error.
428 * Free transmit buffers in upper layer.
429 */
430 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
431 struct macb_dma_desc *desc;
432 u32 ctrl;
433
434 desc = macb_tx_desc(bp, tail);
435 ctrl = desc->ctrl;
436 tx_skb = macb_tx_skb(bp, tail);
437 skb = tx_skb->skb;
438
439 if (ctrl & MACB_BIT(TX_USED)) {
440 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
441 macb_tx_ring_wrap(tail), skb->data);
442 bp->stats.tx_packets++;
443 bp->stats.tx_bytes += skb->len;
444 } else {
445 /*
446 * "Buffers exhausted mid-frame" errors may only happen
447 * if the driver is buggy, so complain loudly about those.
448 * Statistics are updated by hardware.
449 */
450 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
451 netdev_err(bp->dev,
452 "BUG: TX buffers exhausted mid-frame\n");
338 453
339 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 454 desc->ctrl = ctrl | MACB_BIT(TX_USED);
340 DMA_TO_DEVICE);
341 rp->skb = NULL;
342 dev_kfree_skb_irq(skb);
343 } 455 }
344 456
345 bp->tx_head = bp->tx_tail = 0; 457 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
346 458 DMA_TO_DEVICE);
347 /* Enable the transmitter again */ 459 tx_skb->skb = NULL;
348 if (status & MACB_BIT(TGO)) 460 dev_kfree_skb(skb);
349 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
350 } 461 }
351 462
352 if (!(status & MACB_BIT(COMP))) 463 /* Make descriptor updates visible to hardware */
353 /* 464 wmb();
354 * This may happen when a buffer becomes complete 465
355 * between reading the ISR and scanning the 466 /* Reinitialize the TX desc queue */
356 * descriptors. Nothing to worry about. 467 macb_writel(bp, TBQP, bp->tx_ring_dma);
357 */ 468 /* Make TX ring reflect state of hardware */
358 return; 469 bp->tx_head = bp->tx_tail = 0;
470
471 /* Now we are ready to start transmission again */
472 netif_wake_queue(bp->dev);
473
474 /* Housework before enabling TX IRQ */
475 macb_writel(bp, TSR, macb_readl(bp, TSR));
476 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
477}
478
479static void macb_tx_interrupt(struct macb *bp)
480{
481 unsigned int tail;
482 unsigned int head;
483 u32 status;
484
485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status);
487
488 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
489 (unsigned long)status);
359 490
360 head = bp->tx_head; 491 head = bp->tx_head;
361 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 492 for (tail = bp->tx_tail; tail != head; tail++) {
362 struct ring_info *rp = &bp->tx_skb[tail]; 493 struct macb_tx_skb *tx_skb;
363 struct sk_buff *skb = rp->skb; 494 struct sk_buff *skb;
364 u32 bufstat; 495 struct macb_dma_desc *desc;
496 u32 ctrl;
365 497
366 BUG_ON(skb == NULL); 498 desc = macb_tx_desc(bp, tail);
367 499
500 /* Make hw descriptor updates visible to CPU */
368 rmb(); 501 rmb();
369 bufstat = bp->tx_ring[tail].ctrl;
370 502
371 if (!(bufstat & MACB_BIT(TX_USED))) 503 ctrl = desc->ctrl;
504
505 if (!(ctrl & MACB_BIT(TX_USED)))
372 break; 506 break;
373 507
374 netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", 508 tx_skb = macb_tx_skb(bp, tail);
375 tail, skb->data); 509 skb = tx_skb->skb;
376 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 510
511 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
512 macb_tx_ring_wrap(tail), skb->data);
513 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
377 DMA_TO_DEVICE); 514 DMA_TO_DEVICE);
378 bp->stats.tx_packets++; 515 bp->stats.tx_packets++;
379 bp->stats.tx_bytes += skb->len; 516 bp->stats.tx_bytes += skb->len;
380 rp->skb = NULL; 517 tx_skb->skb = NULL;
381 dev_kfree_skb_irq(skb); 518 dev_kfree_skb_irq(skb);
382 } 519 }
383 520
384 bp->tx_tail = tail; 521 bp->tx_tail = tail;
385 if (netif_queue_stopped(bp->dev) && 522 if (netif_queue_stopped(bp->dev)
386 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) 523 && CIRC_CNT(bp->tx_head, bp->tx_tail,
524 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
387 netif_wake_queue(bp->dev); 525 netif_wake_queue(bp->dev);
388} 526}
389 527
@@ -392,31 +530,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
392{ 530{
393 unsigned int len; 531 unsigned int len;
394 unsigned int frag; 532 unsigned int frag;
395 unsigned int offset = 0; 533 unsigned int offset;
396 struct sk_buff *skb; 534 struct sk_buff *skb;
535 struct macb_dma_desc *desc;
397 536
398 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); 537 desc = macb_rx_desc(bp, last_frag);
538 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
399 539
400 netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 540 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
401 first_frag, last_frag, len); 541 macb_rx_ring_wrap(first_frag),
542 macb_rx_ring_wrap(last_frag), len);
402 543
403 skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); 544 /*
545 * The ethernet header starts NET_IP_ALIGN bytes into the
546 * first buffer. Since the header is 14 bytes, this makes the
547 * payload word-aligned.
548 *
549 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
550 * the two padding bytes into the skb so that we avoid hitting
551 * the slowpath in memcpy(), and pull them off afterwards.
552 */
553 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
404 if (!skb) { 554 if (!skb) {
405 bp->stats.rx_dropped++; 555 bp->stats.rx_dropped++;
406 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 556 for (frag = first_frag; ; frag++) {
407 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 557 desc = macb_rx_desc(bp, frag);
558 desc->addr &= ~MACB_BIT(RX_USED);
408 if (frag == last_frag) 559 if (frag == last_frag)
409 break; 560 break;
410 } 561 }
562
563 /* Make descriptor updates visible to hardware */
411 wmb(); 564 wmb();
565
412 return 1; 566 return 1;
413 } 567 }
414 568
415 skb_reserve(skb, RX_OFFSET); 569 offset = 0;
570 len += NET_IP_ALIGN;
416 skb_checksum_none_assert(skb); 571 skb_checksum_none_assert(skb);
417 skb_put(skb, len); 572 skb_put(skb, len);
418 573
419 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 574 for (frag = first_frag; ; frag++) {
420 unsigned int frag_len = RX_BUFFER_SIZE; 575 unsigned int frag_len = RX_BUFFER_SIZE;
421 576
422 if (offset + frag_len > len) { 577 if (offset + frag_len > len) {
@@ -424,22 +579,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
424 frag_len = len - offset; 579 frag_len = len - offset;
425 } 580 }
426 skb_copy_to_linear_data_offset(skb, offset, 581 skb_copy_to_linear_data_offset(skb, offset,
427 (bp->rx_buffers + 582 macb_rx_buffer(bp, frag), frag_len);
428 (RX_BUFFER_SIZE * frag)),
429 frag_len);
430 offset += RX_BUFFER_SIZE; 583 offset += RX_BUFFER_SIZE;
431 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 584 desc = macb_rx_desc(bp, frag);
432 wmb(); 585 desc->addr &= ~MACB_BIT(RX_USED);
433 586
434 if (frag == last_frag) 587 if (frag == last_frag)
435 break; 588 break;
436 } 589 }
437 590
591 /* Make descriptor updates visible to hardware */
592 wmb();
593
594 __skb_pull(skb, NET_IP_ALIGN);
438 skb->protocol = eth_type_trans(skb, bp->dev); 595 skb->protocol = eth_type_trans(skb, bp->dev);
439 596
440 bp->stats.rx_packets++; 597 bp->stats.rx_packets++;
441 bp->stats.rx_bytes += len; 598 bp->stats.rx_bytes += skb->len;
442 netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", 599 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
443 skb->len, skb->csum); 600 skb->len, skb->csum);
444 netif_receive_skb(skb); 601 netif_receive_skb(skb);
445 602
@@ -452,8 +609,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
452{ 609{
453 unsigned int frag; 610 unsigned int frag;
454 611
455 for (frag = begin; frag != end; frag = NEXT_RX(frag)) 612 for (frag = begin; frag != end; frag++) {
456 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 613 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
614 desc->addr &= ~MACB_BIT(RX_USED);
615 }
616
617 /* Make descriptor updates visible to hardware */
457 wmb(); 618 wmb();
458 619
459 /* 620 /*
@@ -466,15 +627,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
466static int macb_rx(struct macb *bp, int budget) 627static int macb_rx(struct macb *bp, int budget)
467{ 628{
468 int received = 0; 629 int received = 0;
469 unsigned int tail = bp->rx_tail; 630 unsigned int tail;
470 int first_frag = -1; 631 int first_frag = -1;
471 632
472 for (; budget > 0; tail = NEXT_RX(tail)) { 633 for (tail = bp->rx_tail; budget > 0; tail++) {
634 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
473 u32 addr, ctrl; 635 u32 addr, ctrl;
474 636
637 /* Make hw descriptor updates visible to CPU */
475 rmb(); 638 rmb();
476 addr = bp->rx_ring[tail].addr; 639
477 ctrl = bp->rx_ring[tail].ctrl; 640 addr = desc->addr;
641 ctrl = desc->ctrl;
478 642
479 if (!(addr & MACB_BIT(RX_USED))) 643 if (!(addr & MACB_BIT(RX_USED)))
480 break; 644 break;
@@ -517,7 +681,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
517 681
518 work_done = 0; 682 work_done = 0;
519 683
520 netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", 684 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
521 (unsigned long)status, budget); 685 (unsigned long)status, budget);
522 686
523 work_done = macb_rx(bp, budget); 687 work_done = macb_rx(bp, budget);
@@ -552,10 +716,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
552 while (status) { 716 while (status) {
553 /* close possible race with dev_close */ 717 /* close possible race with dev_close */
554 if (unlikely(!netif_running(dev))) { 718 if (unlikely(!netif_running(dev))) {
555 macb_writel(bp, IDR, ~0UL); 719 macb_writel(bp, IDR, -1);
556 break; 720 break;
557 } 721 }
558 722
723 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
724
559 if (status & MACB_RX_INT_FLAGS) { 725 if (status & MACB_RX_INT_FLAGS) {
560 /* 726 /*
561 * There's no point taking any more interrupts 727 * There's no point taking any more interrupts
@@ -567,14 +733,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
567 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 733 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
568 734
569 if (napi_schedule_prep(&bp->napi)) { 735 if (napi_schedule_prep(&bp->napi)) {
570 netdev_dbg(bp->dev, "scheduling RX softirq\n"); 736 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
571 __napi_schedule(&bp->napi); 737 __napi_schedule(&bp->napi);
572 } 738 }
573 } 739 }
574 740
575 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | 741 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
576 MACB_BIT(ISR_RLE))) 742 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
577 macb_tx(bp); 743 schedule_work(&bp->tx_error_task);
744 break;
745 }
746
747 if (status & MACB_BIT(TCOMP))
748 macb_tx_interrupt(bp);
578 749
579 /* 750 /*
580 * Link change detection isn't possible with RMII, so we'll 751 * Link change detection isn't possible with RMII, so we'll
@@ -626,11 +797,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
626 struct macb *bp = netdev_priv(dev); 797 struct macb *bp = netdev_priv(dev);
627 dma_addr_t mapping; 798 dma_addr_t mapping;
628 unsigned int len, entry; 799 unsigned int len, entry;
800 struct macb_dma_desc *desc;
801 struct macb_tx_skb *tx_skb;
629 u32 ctrl; 802 u32 ctrl;
630 unsigned long flags; 803 unsigned long flags;
631 804
632#ifdef DEBUG 805#if defined(DEBUG) && defined(VERBOSE_DEBUG)
633 netdev_dbg(bp->dev, 806 netdev_vdbg(bp->dev,
634 "start_xmit: len %u head %p data %p tail %p end %p\n", 807 "start_xmit: len %u head %p data %p tail %p end %p\n",
635 skb->len, skb->head, skb->data, 808 skb->len, skb->head, skb->data,
636 skb_tail_pointer(skb), skb_end_pointer(skb)); 809 skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -642,7 +815,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
642 spin_lock_irqsave(&bp->lock, flags); 815 spin_lock_irqsave(&bp->lock, flags);
643 816
644 /* This is a hard error, log it. */ 817 /* This is a hard error, log it. */
645 if (TX_BUFFS_AVAIL(bp) < 1) { 818 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
646 netif_stop_queue(dev); 819 netif_stop_queue(dev);
647 spin_unlock_irqrestore(&bp->lock, flags); 820 spin_unlock_irqrestore(&bp->lock, flags);
648 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); 821 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -651,13 +824,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
651 return NETDEV_TX_BUSY; 824 return NETDEV_TX_BUSY;
652 } 825 }
653 826
654 entry = bp->tx_head; 827 entry = macb_tx_ring_wrap(bp->tx_head);
655 netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); 828 bp->tx_head++;
829 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
656 mapping = dma_map_single(&bp->pdev->dev, skb->data, 830 mapping = dma_map_single(&bp->pdev->dev, skb->data,
657 len, DMA_TO_DEVICE); 831 len, DMA_TO_DEVICE);
658 bp->tx_skb[entry].skb = skb; 832
659 bp->tx_skb[entry].mapping = mapping; 833 tx_skb = &bp->tx_skb[entry];
660 netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", 834 tx_skb->skb = skb;
835 tx_skb->mapping = mapping;
836 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
661 skb->data, (unsigned long)mapping); 837 skb->data, (unsigned long)mapping);
662 838
663 ctrl = MACB_BF(TX_FRMLEN, len); 839 ctrl = MACB_BF(TX_FRMLEN, len);
@@ -665,18 +841,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 if (entry == (TX_RING_SIZE - 1)) 841 if (entry == (TX_RING_SIZE - 1))
666 ctrl |= MACB_BIT(TX_WRAP); 842 ctrl |= MACB_BIT(TX_WRAP);
667 843
668 bp->tx_ring[entry].addr = mapping; 844 desc = &bp->tx_ring[entry];
669 bp->tx_ring[entry].ctrl = ctrl; 845 desc->addr = mapping;
670 wmb(); 846 desc->ctrl = ctrl;
671 847
672 entry = NEXT_TX(entry); 848 /* Make newly initialized descriptor visible to hardware */
673 bp->tx_head = entry; 849 wmb();
674 850
675 skb_tx_timestamp(skb); 851 skb_tx_timestamp(skb);
676 852
677 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 853 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
678 854
679 if (TX_BUFFS_AVAIL(bp) < 1) 855 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
680 netif_stop_queue(dev); 856 netif_stop_queue(dev);
681 857
682 spin_unlock_irqrestore(&bp->lock, flags); 858 spin_unlock_irqrestore(&bp->lock, flags);
@@ -712,7 +888,7 @@ static int macb_alloc_consistent(struct macb *bp)
712{ 888{
713 int size; 889 int size;
714 890
715 size = TX_RING_SIZE * sizeof(struct ring_info); 891 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
716 bp->tx_skb = kmalloc(size, GFP_KERNEL); 892 bp->tx_skb = kmalloc(size, GFP_KERNEL);
717 if (!bp->tx_skb) 893 if (!bp->tx_skb)
718 goto out_err; 894 goto out_err;
@@ -775,9 +951,6 @@ static void macb_init_rings(struct macb *bp)
775 951
776static void macb_reset_hw(struct macb *bp) 952static void macb_reset_hw(struct macb *bp)
777{ 953{
778 /* Make sure we have the write buffer for ourselves */
779 wmb();
780
781 /* 954 /*
782 * Disable RX and TX (XXX: Should we halt the transmission 955 * Disable RX and TX (XXX: Should we halt the transmission
783 * more gracefully?) 956 * more gracefully?)
@@ -788,11 +961,11 @@ static void macb_reset_hw(struct macb *bp)
788 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 961 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
789 962
790 /* Clear all status flags */ 963 /* Clear all status flags */
791 macb_writel(bp, TSR, ~0UL); 964 macb_writel(bp, TSR, -1);
792 macb_writel(bp, RSR, ~0UL); 965 macb_writel(bp, RSR, -1);
793 966
794 /* Disable all interrupts */ 967 /* Disable all interrupts */
795 macb_writel(bp, IDR, ~0UL); 968 macb_writel(bp, IDR, -1);
796 macb_readl(bp, ISR); 969 macb_readl(bp, ISR);
797} 970}
798 971
@@ -860,8 +1033,12 @@ static u32 macb_dbw(struct macb *bp)
860} 1033}
861 1034
862/* 1035/*
863 * Configure the receive DMA engine to use the correct receive buffer size. 1036 * Configure the receive DMA engine
864 * This is a configurable parameter for GEM. 1037 * - use the correct receive buffer size
1038 * - set the possibility to use INCR16 bursts
1039 * (if not supported by FIFO, it will fallback to default)
1040 * - set both rx/tx packet buffers to full memory size
1041 * These are configurable parameters for GEM.
865 */ 1042 */
866static void macb_configure_dma(struct macb *bp) 1043static void macb_configure_dma(struct macb *bp)
867{ 1044{
@@ -870,6 +1047,8 @@ static void macb_configure_dma(struct macb *bp)
870 if (macb_is_gem(bp)) { 1047 if (macb_is_gem(bp)) {
871 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1048 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
872 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); 1049 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1050 dmacfg |= GEM_BF(FBLDO, 16);
1051 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
873 gem_writel(bp, DMACFG, dmacfg); 1052 gem_writel(bp, DMACFG, dmacfg);
874 } 1053 }
875} 1054}
@@ -879,9 +1058,10 @@ static void macb_init_hw(struct macb *bp)
879 u32 config; 1058 u32 config;
880 1059
881 macb_reset_hw(bp); 1060 macb_reset_hw(bp);
882 __macb_set_hwaddr(bp); 1061 macb_set_hwaddr(bp);
883 1062
884 config = macb_mdc_clk_div(bp); 1063 config = macb_mdc_clk_div(bp);
1064 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
885 config |= MACB_BIT(PAE); /* PAuse Enable */ 1065 config |= MACB_BIT(PAE); /* PAuse Enable */
886 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1066 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
887 config |= MACB_BIT(BIG); /* Receive oversized frames */ 1067 config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -891,6 +1071,8 @@ static void macb_init_hw(struct macb *bp)
891 config |= MACB_BIT(NBC); /* No BroadCast */ 1071 config |= MACB_BIT(NBC); /* No BroadCast */
892 config |= macb_dbw(bp); 1072 config |= macb_dbw(bp);
893 macb_writel(bp, NCFGR, config); 1073 macb_writel(bp, NCFGR, config);
1074 bp->speed = SPEED_10;
1075 bp->duplex = DUPLEX_HALF;
894 1076
895 macb_configure_dma(bp); 1077 macb_configure_dma(bp);
896 1078
@@ -902,13 +1084,8 @@ static void macb_init_hw(struct macb *bp)
902 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1084 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
903 1085
904 /* Enable interrupts */ 1086 /* Enable interrupts */
905 macb_writel(bp, IER, (MACB_BIT(RCOMP) 1087 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
906 | MACB_BIT(RXUBR) 1088 | MACB_TX_INT_FLAGS
907 | MACB_BIT(ISR_TUND)
908 | MACB_BIT(ISR_RLE)
909 | MACB_BIT(TXERR)
910 | MACB_BIT(TCOMP)
911 | MACB_BIT(ISR_ROVR)
912 | MACB_BIT(HRESP))); 1089 | MACB_BIT(HRESP)));
913 1090
914} 1091}
@@ -996,7 +1173,7 @@ static void macb_sethashtable(struct net_device *dev)
996/* 1173/*
997 * Enable/Disable promiscuous and multicast modes. 1174 * Enable/Disable promiscuous and multicast modes.
998 */ 1175 */
999static void macb_set_rx_mode(struct net_device *dev) 1176void macb_set_rx_mode(struct net_device *dev)
1000{ 1177{
1001 unsigned long cfg; 1178 unsigned long cfg;
1002 struct macb *bp = netdev_priv(dev); 1179 struct macb *bp = netdev_priv(dev);
@@ -1028,6 +1205,7 @@ static void macb_set_rx_mode(struct net_device *dev)
1028 1205
1029 macb_writel(bp, NCFGR, cfg); 1206 macb_writel(bp, NCFGR, cfg);
1030} 1207}
1208EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1031 1209
1032static int macb_open(struct net_device *dev) 1210static int macb_open(struct net_device *dev)
1033{ 1211{
@@ -1043,9 +1221,6 @@ static int macb_open(struct net_device *dev)
1043 if (!bp->phy_dev) 1221 if (!bp->phy_dev)
1044 return -EAGAIN; 1222 return -EAGAIN;
1045 1223
1046 if (!is_valid_ether_addr(dev->dev_addr))
1047 return -EADDRNOTAVAIL;
1048
1049 err = macb_alloc_consistent(bp); 1224 err = macb_alloc_consistent(bp);
1050 if (err) { 1225 if (err) {
1051 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 1226 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
@@ -1135,7 +1310,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
1135 return nstat; 1310 return nstat;
1136} 1311}
1137 1312
1138static struct net_device_stats *macb_get_stats(struct net_device *dev) 1313struct net_device_stats *macb_get_stats(struct net_device *dev)
1139{ 1314{
1140 struct macb *bp = netdev_priv(dev); 1315 struct macb *bp = netdev_priv(dev);
1141 struct net_device_stats *nstat = &bp->stats; 1316 struct net_device_stats *nstat = &bp->stats;
@@ -1181,6 +1356,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
1181 1356
1182 return nstat; 1357 return nstat;
1183} 1358}
1359EXPORT_SYMBOL_GPL(macb_get_stats);
1184 1360
1185static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1361static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1186{ 1362{
@@ -1204,25 +1380,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1204 return phy_ethtool_sset(phydev, cmd); 1380 return phy_ethtool_sset(phydev, cmd);
1205} 1381}
1206 1382
1207static void macb_get_drvinfo(struct net_device *dev, 1383static int macb_get_regs_len(struct net_device *netdev)
1208 struct ethtool_drvinfo *info) 1384{
1385 return MACB_GREGS_NBR * sizeof(u32);
1386}
1387
1388static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1389 void *p)
1209{ 1390{
1210 struct macb *bp = netdev_priv(dev); 1391 struct macb *bp = netdev_priv(dev);
1392 unsigned int tail, head;
1393 u32 *regs_buff = p;
1394
1395 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1396 | MACB_GREGS_VERSION;
1397
1398 tail = macb_tx_ring_wrap(bp->tx_tail);
1399 head = macb_tx_ring_wrap(bp->tx_head);
1400
1401 regs_buff[0] = macb_readl(bp, NCR);
1402 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1403 regs_buff[2] = macb_readl(bp, NSR);
1404 regs_buff[3] = macb_readl(bp, TSR);
1405 regs_buff[4] = macb_readl(bp, RBQP);
1406 regs_buff[5] = macb_readl(bp, TBQP);
1407 regs_buff[6] = macb_readl(bp, RSR);
1408 regs_buff[7] = macb_readl(bp, IMR);
1211 1409
1212 strcpy(info->driver, bp->pdev->dev.driver->name); 1410 regs_buff[8] = tail;
1213 strcpy(info->version, "$Revision: 1.14 $"); 1411 regs_buff[9] = head;
1214 strcpy(info->bus_info, dev_name(&bp->pdev->dev)); 1412 regs_buff[10] = macb_tx_dma(bp, tail);
1413 regs_buff[11] = macb_tx_dma(bp, head);
1414
1415 if (macb_is_gem(bp)) {
1416 regs_buff[12] = gem_readl(bp, USRIO);
1417 regs_buff[13] = gem_readl(bp, DMACFG);
1418 }
1215} 1419}
1216 1420
1217static const struct ethtool_ops macb_ethtool_ops = { 1421const struct ethtool_ops macb_ethtool_ops = {
1218 .get_settings = macb_get_settings, 1422 .get_settings = macb_get_settings,
1219 .set_settings = macb_set_settings, 1423 .set_settings = macb_set_settings,
1220 .get_drvinfo = macb_get_drvinfo, 1424 .get_regs_len = macb_get_regs_len,
1425 .get_regs = macb_get_regs,
1221 .get_link = ethtool_op_get_link, 1426 .get_link = ethtool_op_get_link,
1222 .get_ts_info = ethtool_op_get_ts_info, 1427 .get_ts_info = ethtool_op_get_ts_info,
1223}; 1428};
1429EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1224 1430
1225static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1431int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1226{ 1432{
1227 struct macb *bp = netdev_priv(dev); 1433 struct macb *bp = netdev_priv(dev);
1228 struct phy_device *phydev = bp->phy_dev; 1434 struct phy_device *phydev = bp->phy_dev;
@@ -1235,6 +1441,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1235 1441
1236 return phy_mii_ioctl(phydev, rq, cmd); 1442 return phy_mii_ioctl(phydev, rq, cmd);
1237} 1443}
1444EXPORT_SYMBOL_GPL(macb_ioctl);
1238 1445
1239static const struct net_device_ops macb_netdev_ops = { 1446static const struct net_device_ops macb_netdev_ops = {
1240 .ndo_open = macb_open, 1447 .ndo_open = macb_open,
@@ -1306,6 +1513,7 @@ static int __init macb_probe(struct platform_device *pdev)
1306 struct phy_device *phydev; 1513 struct phy_device *phydev;
1307 u32 config; 1514 u32 config;
1308 int err = -ENXIO; 1515 int err = -ENXIO;
1516 struct pinctrl *pinctrl;
1309 1517
1310 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1518 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1311 if (!regs) { 1519 if (!regs) {
@@ -1313,6 +1521,15 @@ static int __init macb_probe(struct platform_device *pdev)
1313 goto err_out; 1521 goto err_out;
1314 } 1522 }
1315 1523
1524 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1525 if (IS_ERR(pinctrl)) {
1526 err = PTR_ERR(pinctrl);
1527 if (err == -EPROBE_DEFER)
1528 goto err_out;
1529
1530 dev_warn(&pdev->dev, "No pinctrl provided\n");
1531 }
1532
1316 err = -ENOMEM; 1533 err = -ENOMEM;
1317 dev = alloc_etherdev(sizeof(*bp)); 1534 dev = alloc_etherdev(sizeof(*bp));
1318 if (!dev) 1535 if (!dev)
@@ -1328,6 +1545,7 @@ static int __init macb_probe(struct platform_device *pdev)
1328 bp->dev = dev; 1545 bp->dev = dev;
1329 1546
1330 spin_lock_init(&bp->lock); 1547 spin_lock_init(&bp->lock);
1548 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1331 1549
1332 bp->pclk = clk_get(&pdev->dev, "pclk"); 1550 bp->pclk = clk_get(&pdev->dev, "pclk");
1333 if (IS_ERR(bp->pclk)) { 1551 if (IS_ERR(bp->pclk)) {
@@ -1384,7 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev)
1384 bp->phy_interface = err; 1602 bp->phy_interface = err;
1385 } 1603 }
1386 1604
1387 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 1605 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1606 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1607 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
1388#if defined(CONFIG_ARCH_AT91) 1608#if defined(CONFIG_ARCH_AT91)
1389 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | 1609 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1390 MACB_BIT(CLKEN))); 1610 MACB_BIT(CLKEN)));
@@ -1398,8 +1618,6 @@ static int __init macb_probe(struct platform_device *pdev)
1398 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); 1618 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1399#endif 1619#endif
1400 1620
1401 bp->tx_pending = DEF_TX_RING_PENDING;
1402
1403 err = register_netdev(dev); 1621 err = register_netdev(dev);
1404 if (err) { 1622 if (err) {
1405 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 1623 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");