aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/arm/ixp4xx_eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/arm/ixp4xx_eth.c')
-rw-r--r--drivers/net/arm/ixp4xx_eth.c344
1 files changed, 176 insertions, 168 deletions
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index e2d702b8b2e4..26af411fc428 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,12 +30,11 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/mii.h> 33#include <linux/phy.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <mach/npe.h> 35#include <mach/npe.h>
36#include <mach/qmgr.h> 36#include <mach/qmgr.h>
37 37
38#define DEBUG_QUEUES 0
39#define DEBUG_DESC 0 38#define DEBUG_DESC 0
40#define DEBUG_RX 0 39#define DEBUG_RX 0
41#define DEBUG_TX 0 40#define DEBUG_TX 0
@@ -59,7 +58,6 @@
59#define NAPI_WEIGHT 16 58#define NAPI_WEIGHT 16
60#define MDIO_INTERVAL (3 * HZ) 59#define MDIO_INTERVAL (3 * HZ)
61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 60#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
62#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
63#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 61#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
64 62
65#define NPE_ID(port_id) ((port_id) >> 4) 63#define NPE_ID(port_id) ((port_id) >> 4)
@@ -164,15 +162,14 @@ struct port {
164 struct npe *npe; 162 struct npe *npe;
165 struct net_device *netdev; 163 struct net_device *netdev;
166 struct napi_struct napi; 164 struct napi_struct napi;
167 struct net_device_stats stat; 165 struct phy_device *phydev;
168 struct mii_if_info mii;
169 struct delayed_work mdio_thread;
170 struct eth_plat_info *plat; 166 struct eth_plat_info *plat;
171 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 167 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
172 struct desc *desc_tab; /* coherent */ 168 struct desc *desc_tab; /* coherent */
173 u32 desc_tab_phys; 169 u32 desc_tab_phys;
174 int id; /* logical port ID */ 170 int id; /* logical port ID */
175 u16 mii_bmcr; 171 int speed, duplex;
172 u8 firmware[4];
176}; 173};
177 174
178/* NPE message structure */ 175/* NPE message structure */
@@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
243 240
244static spinlock_t mdio_lock; 241static spinlock_t mdio_lock;
245static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 242static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
243struct mii_bus *mdio_bus;
246static int ports_open; 244static int ports_open;
247static struct port *npe_port_tab[MAX_NPES]; 245static struct port *npe_port_tab[MAX_NPES];
248static struct dma_pool *dma_pool; 246static struct dma_pool *dma_pool;
249 247
250 248
251static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, 249static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
252 int write, u16 cmd) 250 int write, u16 cmd)
253{ 251{
254 int cycles = 0; 252 int cycles = 0;
255 253
256 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 254 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
257 printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); 255 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
258 return 0; 256 return -1;
259 } 257 }
260 258
261 if (write) { 259 if (write) {
@@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
274 } 272 }
275 273
276 if (cycles == MAX_MDIO_RETRIES) { 274 if (cycles == MAX_MDIO_RETRIES) {
277 printk(KERN_ERR "%s: MII write failed\n", dev->name); 275 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
278 return 0; 276 phy_id);
277 return -1;
279 } 278 }
280 279
281#if DEBUG_MDIO 280#if DEBUG_MDIO
282 printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, 281 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
283 cycles); 282 phy_id, write ? "write" : "read", cycles);
284#endif 283#endif
285 284
286 if (write) 285 if (write)
287 return 0; 286 return 0;
288 287
289 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 288 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
290 printk(KERN_ERR "%s: MII read failed\n", dev->name); 289#if DEBUG_MDIO
291 return 0; 290 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
291 phy_id);
292#endif
293 return 0xFFFF; /* don't return error */
292 } 294 }
293 295
294 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 296 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
295 (__raw_readl(&mdio_regs->mdio_status[1]) << 8); 297 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
296} 298}
297 299
298static int mdio_read(struct net_device *dev, int phy_id, int location) 300static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
299{ 301{
300 unsigned long flags; 302 unsigned long flags;
301 u16 val; 303 int ret;
302 304
303 spin_lock_irqsave(&mdio_lock, flags); 305 spin_lock_irqsave(&mdio_lock, flags);
304 val = mdio_cmd(dev, phy_id, location, 0, 0); 306 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
305 spin_unlock_irqrestore(&mdio_lock, flags); 307 spin_unlock_irqrestore(&mdio_lock, flags);
306 return val; 308#if DEBUG_MDIO
309 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
310 phy_id, location, ret);
311#endif
312 return ret;
307} 313}
308 314
309static void mdio_write(struct net_device *dev, int phy_id, int location, 315static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
310 int val) 316 u16 val)
311{ 317{
312 unsigned long flags; 318 unsigned long flags;
319 int ret;
313 320
314 spin_lock_irqsave(&mdio_lock, flags); 321 spin_lock_irqsave(&mdio_lock, flags);
315 mdio_cmd(dev, phy_id, location, 1, val); 322 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
316 spin_unlock_irqrestore(&mdio_lock, flags); 323 spin_unlock_irqrestore(&mdio_lock, flags);
324#if DEBUG_MDIO
325 printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
326 bus->name, phy_id, location, val, ret);
327#endif
328 return ret;
317} 329}
318 330
319static void phy_reset(struct net_device *dev, int phy_id) 331static int ixp4xx_mdio_register(void)
320{ 332{
321 struct port *port = netdev_priv(dev); 333 int err;
322 int cycles = 0;
323 334
324 mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); 335 if (!(mdio_bus = mdiobus_alloc()))
336 return -ENOMEM;
325 337
326 while (cycles < MAX_MII_RESET_RETRIES) { 338 /* All MII PHY accesses use NPE-B Ethernet registers */
327 if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { 339 spin_lock_init(&mdio_lock);
328#if DEBUG_MDIO 340 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
329 printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", 341 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
330 dev->name, cycles); 342
331#endif 343 mdio_bus->name = "IXP4xx MII Bus";
332 return; 344 mdio_bus->read = &ixp4xx_mdio_read;
333 } 345 mdio_bus->write = &ixp4xx_mdio_write;
334 udelay(1); 346 strcpy(mdio_bus->id, "0");
335 cycles++;
336 }
337 347
338 printk(KERN_ERR "%s: MII reset failed\n", dev->name); 348 if ((err = mdiobus_register(mdio_bus)))
349 mdiobus_free(mdio_bus);
350 return err;
339} 351}
340 352
341static void eth_set_duplex(struct port *port) 353static void ixp4xx_mdio_remove(void)
342{ 354{
343 if (port->mii.full_duplex) 355 mdiobus_unregister(mdio_bus);
344 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 356 mdiobus_free(mdio_bus);
345 &port->regs->tx_control[0]);
346 else
347 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
348 &port->regs->tx_control[0]);
349} 357}
350 358
351 359
352static void phy_check_media(struct port *port, int init) 360static void ixp4xx_adjust_link(struct net_device *dev)
353{ 361{
354 if (mii_check_media(&port->mii, 1, init)) 362 struct port *port = netdev_priv(dev);
355 eth_set_duplex(port); 363 struct phy_device *phydev = port->phydev;
356 if (port->mii.force_media) { /* mii_check_media() doesn't work */ 364
357 struct net_device *dev = port->netdev; 365 if (!phydev->link) {
358 int cur_link = mii_link_ok(&port->mii); 366 if (port->speed) {
359 int prev_link = netif_carrier_ok(dev); 367 port->speed = 0;
360
361 if (!prev_link && cur_link) {
362 printk(KERN_INFO "%s: link up\n", dev->name);
363 netif_carrier_on(dev);
364 } else if (prev_link && !cur_link) {
365 printk(KERN_INFO "%s: link down\n", dev->name); 368 printk(KERN_INFO "%s: link down\n", dev->name);
366 netif_carrier_off(dev);
367 } 369 }
370 return;
368 } 371 }
369}
370 372
373 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
374 return;
371 375
372static void mdio_thread(struct work_struct *work) 376 port->speed = phydev->speed;
373{ 377 port->duplex = phydev->duplex;
374 struct port *port = container_of(work, struct port, mdio_thread.work); 378
379 if (port->duplex)
380 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
381 &port->regs->tx_control[0]);
382 else
383 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
384 &port->regs->tx_control[0]);
375 385
376 phy_check_media(port, 0); 386 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
377 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); 387 dev->name, port->speed, port->duplex ? "full" : "half");
378} 388}
379 389
380 390
@@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc)
412#endif 422#endif
413} 423}
414 424
415static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
416{
417#if DEBUG_QUEUES
418 static struct {
419 int queue;
420 char *name;
421 } names[] = {
422 { TX_QUEUE(0x10), "TX#0 " },
423 { TX_QUEUE(0x20), "TX#1 " },
424 { TX_QUEUE(0x00), "TX#2 " },
425 { RXFREE_QUEUE(0x10), "RX-free#0 " },
426 { RXFREE_QUEUE(0x20), "RX-free#1 " },
427 { RXFREE_QUEUE(0x00), "RX-free#2 " },
428 { TXDONE_QUEUE, "TX-done " },
429 };
430 int i;
431
432 for (i = 0; i < ARRAY_SIZE(names); i++)
433 if (names[i].queue == queue)
434 break;
435
436 printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
437 i < ARRAY_SIZE(names) ? names[i].name : "",
438 is_get ? "->" : "<-", phys);
439#endif
440}
441
442static inline u32 queue_get_entry(unsigned int queue)
443{
444 u32 phys = qmgr_get_entry(queue);
445 debug_queue(queue, 1, phys);
446 return phys;
447}
448
449static inline int queue_get_desc(unsigned int queue, struct port *port, 425static inline int queue_get_desc(unsigned int queue, struct port *port,
450 int is_tx) 426 int is_tx)
451{ 427{
452 u32 phys, tab_phys, n_desc; 428 u32 phys, tab_phys, n_desc;
453 struct desc *tab; 429 struct desc *tab;
454 430
455 if (!(phys = queue_get_entry(queue))) 431 if (!(phys = qmgr_get_entry(queue)))
456 return -1; 432 return -1;
457 433
458 phys &= ~0x1F; /* mask out non-address bits */ 434 phys &= ~0x1F; /* mask out non-address bits */
@@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
468static inline void queue_put_desc(unsigned int queue, u32 phys, 444static inline void queue_put_desc(unsigned int queue, u32 phys,
469 struct desc *desc) 445 struct desc *desc)
470{ 446{
471 debug_queue(queue, 0, phys);
472 debug_desc(phys, desc); 447 debug_desc(phys, desc);
473 BUG_ON(phys & 0x1F); 448 BUG_ON(phys & 0x1F);
474 qmgr_put_entry(queue, phys); 449 qmgr_put_entry(queue, phys);
@@ -498,7 +473,7 @@ static void eth_rx_irq(void *pdev)
498 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 473 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
499#endif 474#endif
500 qmgr_disable_irq(port->plat->rxq); 475 qmgr_disable_irq(port->plat->rxq);
501 netif_rx_schedule(dev, &port->napi); 476 netif_rx_schedule(&port->napi);
502} 477}
503 478
504static int eth_poll(struct napi_struct *napi, int budget) 479static int eth_poll(struct napi_struct *napi, int budget)
@@ -526,7 +501,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
526 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", 501 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
527 dev->name); 502 dev->name);
528#endif 503#endif
529 netif_rx_complete(dev, napi); 504 netif_rx_complete(napi);
530 qmgr_enable_irq(rxq); 505 qmgr_enable_irq(rxq);
531 if (!qmgr_stat_empty(rxq) && 506 if (!qmgr_stat_empty(rxq) &&
532 netif_rx_reschedule(dev, napi)) { 507 netif_rx_reschedule(dev, napi)) {
@@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
562#endif 537#endif
563 538
564 if (!skb) { 539 if (!skb) {
565 port->stat.rx_dropped++; 540 dev->stats.rx_dropped++;
566 /* put the desc back on RX-ready queue */ 541 /* put the desc back on RX-ready queue */
567 desc->buf_len = MAX_MRU; 542 desc->buf_len = MAX_MRU;
568 desc->pkt_len = 0; 543 desc->pkt_len = 0;
@@ -588,9 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
588 debug_pkt(dev, "eth_poll", skb->data, skb->len); 563 debug_pkt(dev, "eth_poll", skb->data, skb->len);
589 564
590 skb->protocol = eth_type_trans(skb, dev); 565 skb->protocol = eth_type_trans(skb, dev);
591 dev->last_rx = jiffies; 566 dev->stats.rx_packets++;
592 port->stat.rx_packets++; 567 dev->stats.rx_bytes += skb->len;
593 port->stat.rx_bytes += skb->len;
594 netif_receive_skb(skb); 568 netif_receive_skb(skb);
595 569
596 /* put the new buffer on RX-free queue */ 570 /* put the new buffer on RX-free queue */
@@ -618,7 +592,7 @@ static void eth_txdone_irq(void *unused)
618#if DEBUG_TX 592#if DEBUG_TX
619 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 593 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
620#endif 594#endif
621 while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) { 595 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
622 u32 npe_id, n_desc; 596 u32 npe_id, n_desc;
623 struct port *port; 597 struct port *port;
624 struct desc *desc; 598 struct desc *desc;
@@ -635,8 +609,8 @@ static void eth_txdone_irq(void *unused)
635 debug_desc(phys, desc); 609 debug_desc(phys, desc);
636 610
637 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 611 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
638 port->stat.tx_packets++; 612 port->netdev->stats.tx_packets++;
639 port->stat.tx_bytes += desc->pkt_len; 613 port->netdev->stats.tx_bytes += desc->pkt_len;
640 614
641 dma_unmap_tx(port, desc); 615 dma_unmap_tx(port, desc);
642#if DEBUG_TX 616#if DEBUG_TX
@@ -674,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
674 648
675 if (unlikely(skb->len > MAX_MRU)) { 649 if (unlikely(skb->len > MAX_MRU)) {
676 dev_kfree_skb(skb); 650 dev_kfree_skb(skb);
677 port->stat.tx_errors++; 651 dev->stats.tx_errors++;
678 return NETDEV_TX_OK; 652 return NETDEV_TX_OK;
679 } 653 }
680 654
@@ -690,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
690 bytes = ALIGN(offset + len, 4); 664 bytes = ALIGN(offset + len, 4);
691 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 665 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
692 dev_kfree_skb(skb); 666 dev_kfree_skb(skb);
693 port->stat.tx_dropped++; 667 dev->stats.tx_dropped++;
694 return NETDEV_TX_OK; 668 return NETDEV_TX_OK;
695 } 669 }
696 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 670 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
@@ -704,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
704#else 678#else
705 kfree(mem); 679 kfree(mem);
706#endif 680#endif
707 port->stat.tx_dropped++; 681 dev->stats.tx_dropped++;
708 return NETDEV_TX_OK; 682 return NETDEV_TX_OK;
709 } 683 }
710 684
@@ -747,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
747} 721}
748 722
749 723
750static struct net_device_stats *eth_stats(struct net_device *dev)
751{
752 struct port *port = netdev_priv(dev);
753 return &port->stat;
754}
755
756static void eth_set_mcast_list(struct net_device *dev) 724static void eth_set_mcast_list(struct net_device *dev)
757{ 725{
758 struct port *port = netdev_priv(dev); 726 struct port *port = netdev_priv(dev);
@@ -786,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev)
786static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 754static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
787{ 755{
788 struct port *port = netdev_priv(dev); 756 struct port *port = netdev_priv(dev);
789 unsigned int duplex_chg;
790 int err;
791 757
792 if (!netif_running(dev)) 758 if (!netif_running(dev))
793 return -EINVAL; 759 return -EINVAL;
794 err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); 760 return phy_mii_ioctl(port->phydev, if_mii(req), cmd);
795 if (duplex_chg) 761}
796 eth_set_duplex(port); 762
797 return err; 763/* ethtool support */
764
765static void ixp4xx_get_drvinfo(struct net_device *dev,
766 struct ethtool_drvinfo *info)
767{
768 struct port *port = netdev_priv(dev);
769 strcpy(info->driver, DRV_NAME);
770 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
771 port->firmware[0], port->firmware[1],
772 port->firmware[2], port->firmware[3]);
773 strcpy(info->bus_info, "internal");
798} 774}
799 775
776static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
777{
778 struct port *port = netdev_priv(dev);
779 return phy_ethtool_gset(port->phydev, cmd);
780}
781
782static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
783{
784 struct port *port = netdev_priv(dev);
785 return phy_ethtool_sset(port->phydev, cmd);
786}
787
788static int ixp4xx_nway_reset(struct net_device *dev)
789{
790 struct port *port = netdev_priv(dev);
791 return phy_start_aneg(port->phydev);
792}
793
794static struct ethtool_ops ixp4xx_ethtool_ops = {
795 .get_drvinfo = ixp4xx_get_drvinfo,
796 .get_settings = ixp4xx_get_settings,
797 .set_settings = ixp4xx_set_settings,
798 .nway_reset = ixp4xx_nway_reset,
799 .get_link = ethtool_op_get_link,
800};
801
800 802
801static int request_queues(struct port *port) 803static int request_queues(struct port *port)
802{ 804{
803 int err; 805 int err;
804 806
805 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0); 807 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
808 "%s:RX-free", port->netdev->name);
806 if (err) 809 if (err)
807 return err; 810 return err;
808 811
809 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0); 812 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
813 "%s:RX", port->netdev->name);
810 if (err) 814 if (err)
811 goto rel_rxfree; 815 goto rel_rxfree;
812 816
813 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0); 817 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
818 "%s:TX", port->netdev->name);
814 if (err) 819 if (err)
815 goto rel_rx; 820 goto rel_rx;
816 821
817 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0); 822 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
823 "%s:TX-ready", port->netdev->name);
818 if (err) 824 if (err)
819 goto rel_tx; 825 goto rel_tx;
820 826
821 /* TX-done queue handles skbs sent out by the NPEs */ 827 /* TX-done queue handles skbs sent out by the NPEs */
822 if (!ports_open) { 828 if (!ports_open) {
823 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0); 829 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
830 "%s:TX-done", DRV_NAME);
824 if (err) 831 if (err)
825 goto rel_txready; 832 goto rel_txready;
826 } 833 }
@@ -944,10 +951,12 @@ static int eth_open(struct net_device *dev)
944 npe_name(npe)); 951 npe_name(npe));
945 return -EIO; 952 return -EIO;
946 } 953 }
954 port->firmware[0] = msg.byte4;
955 port->firmware[1] = msg.byte5;
956 port->firmware[2] = msg.byte6;
957 port->firmware[3] = msg.byte7;
947 } 958 }
948 959
949 mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
950
951 memset(&msg, 0, sizeof(msg)); 960 memset(&msg, 0, sizeof(msg));
952 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 961 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
953 msg.eth_id = port->id; 962 msg.eth_id = port->id;
@@ -985,6 +994,9 @@ static int eth_open(struct net_device *dev)
985 return err; 994 return err;
986 } 995 }
987 996
997 port->speed = 0; /* force "link up" message */
998 phy_start(port->phydev);
999
988 for (i = 0; i < ETH_ALEN; i++) 1000 for (i = 0; i < ETH_ALEN; i++)
989 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1001 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
990 __raw_writel(0x08, &port->regs->random_seed); 1002 __raw_writel(0x08, &port->regs->random_seed);
@@ -1012,10 +1024,8 @@ static int eth_open(struct net_device *dev)
1012 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1024 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1013 1025
1014 napi_enable(&port->napi); 1026 napi_enable(&port->napi);
1015 phy_check_media(port, 1);
1016 eth_set_mcast_list(dev); 1027 eth_set_mcast_list(dev);
1017 netif_start_queue(dev); 1028 netif_start_queue(dev);
1018 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1019 1029
1020 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1030 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1021 eth_rx_irq, dev); 1031 eth_rx_irq, dev);
@@ -1026,7 +1036,7 @@ static int eth_open(struct net_device *dev)
1026 } 1036 }
1027 ports_open++; 1037 ports_open++;
1028 /* we may already have RX data, enables IRQ */ 1038 /* we may already have RX data, enables IRQ */
1029 netif_rx_schedule(dev, &port->napi); 1039 netif_rx_schedule(&port->napi);
1030 return 0; 1040 return 0;
1031} 1041}
1032 1042
@@ -1106,25 +1116,31 @@ static int eth_close(struct net_device *dev)
1106 printk(KERN_CRIT "%s: unable to disable loopback\n", 1116 printk(KERN_CRIT "%s: unable to disable loopback\n",
1107 dev->name); 1117 dev->name);
1108 1118
1109 port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & 1119 phy_stop(port->phydev);
1110 ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1111 mdio_write(dev, port->plat->phy, MII_BMCR,
1112 port->mii_bmcr | BMCR_PDOWN);
1113 1120
1114 if (!ports_open) 1121 if (!ports_open)
1115 qmgr_disable_irq(TXDONE_QUEUE); 1122 qmgr_disable_irq(TXDONE_QUEUE);
1116 cancel_rearming_delayed_work(&port->mdio_thread);
1117 destroy_queues(port); 1123 destroy_queues(port);
1118 release_queues(port); 1124 release_queues(port);
1119 return 0; 1125 return 0;
1120} 1126}
1121 1127
1128static const struct net_device_ops ixp4xx_netdev_ops = {
1129 .ndo_open = eth_open,
1130 .ndo_stop = eth_close,
1131 .ndo_start_xmit = eth_xmit,
1132 .ndo_set_multicast_list = eth_set_mcast_list,
1133 .ndo_do_ioctl = eth_ioctl,
1134
1135};
1136
1122static int __devinit eth_init_one(struct platform_device *pdev) 1137static int __devinit eth_init_one(struct platform_device *pdev)
1123{ 1138{
1124 struct port *port; 1139 struct port *port;
1125 struct net_device *dev; 1140 struct net_device *dev;
1126 struct eth_plat_info *plat = pdev->dev.platform_data; 1141 struct eth_plat_info *plat = pdev->dev.platform_data;
1127 u32 regs_phys; 1142 u32 regs_phys;
1143 char phy_id[BUS_ID_SIZE];
1128 int err; 1144 int err;
1129 1145
1130 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1146 if (!(dev = alloc_etherdev(sizeof(struct port))))
@@ -1153,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1153 goto err_free; 1169 goto err_free;
1154 } 1170 }
1155 1171
1156 dev->open = eth_open; 1172 dev->netdev_ops = &ixp4xx_netdev_ops;
1157 dev->hard_start_xmit = eth_xmit; 1173 dev->ethtool_ops = &ixp4xx_ethtool_ops;
1158 dev->stop = eth_close;
1159 dev->get_stats = eth_stats;
1160 dev->do_ioctl = eth_ioctl;
1161 dev->set_multicast_list = eth_set_mcast_list;
1162 dev->tx_queue_len = 100; 1174 dev->tx_queue_len = 100;
1163 1175
1164 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); 1176 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
@@ -1191,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1191 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1203 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1192 udelay(50); 1204 udelay(50);
1193 1205
1194 port->mii.dev = dev; 1206 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
1195 port->mii.mdio_read = mdio_read; 1207 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1196 port->mii.mdio_write = mdio_write; 1208 PHY_INTERFACE_MODE_MII);
1197 port->mii.phy_id = plat->phy; 1209 if (IS_ERR(port->phydev)) {
1198 port->mii.phy_id_mask = 0x1F; 1210 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1199 port->mii.reg_num_mask = 0x1F; 1211 return PTR_ERR(port->phydev);
1212 }
1213
1214 port->phydev->irq = PHY_POLL;
1200 1215
1201 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, 1216 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1202 npe_name(port->npe)); 1217 npe_name(port->npe));
1203 1218
1204 phy_reset(dev, plat->phy);
1205 port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1206 ~(BMCR_RESET | BMCR_PDOWN);
1207 mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1208
1209 INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1210 return 0; 1219 return 0;
1211 1220
1212err_unreg: 1221err_unreg:
@@ -1232,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev)
1232 return 0; 1241 return 0;
1233} 1242}
1234 1243
1235static struct platform_driver drv = { 1244static struct platform_driver ixp4xx_eth_driver = {
1236 .driver.name = DRV_NAME, 1245 .driver.name = DRV_NAME,
1237 .probe = eth_init_one, 1246 .probe = eth_init_one,
1238 .remove = eth_remove_one, 1247 .remove = eth_remove_one,
@@ -1240,20 +1249,19 @@ static struct platform_driver drv = {
1240 1249
1241static int __init eth_init_module(void) 1250static int __init eth_init_module(void)
1242{ 1251{
1252 int err;
1243 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) 1253 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1244 return -ENOSYS; 1254 return -ENOSYS;
1245 1255
1246 /* All MII PHY accesses use NPE-B Ethernet registers */ 1256 if ((err = ixp4xx_mdio_register()))
1247 spin_lock_init(&mdio_lock); 1257 return err;
1248 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 1258 return platform_driver_register(&ixp4xx_eth_driver);
1249 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1250
1251 return platform_driver_register(&drv);
1252} 1259}
1253 1260
1254static void __exit eth_cleanup_module(void) 1261static void __exit eth_cleanup_module(void)
1255{ 1262{
1256 platform_driver_unregister(&drv); 1263 platform_driver_unregister(&ixp4xx_eth_driver);
1264 ixp4xx_mdio_remove();
1257} 1265}
1258 1266
1259MODULE_AUTHOR("Krzysztof Halasa"); 1267MODULE_AUTHOR("Krzysztof Halasa");