diff options
Diffstat (limited to 'drivers/net/arm')
-rw-r--r-- | drivers/net/arm/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/arm/ixp4xx_eth.c | 337 |
2 files changed, 174 insertions, 165 deletions
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index abe17762e6f5..2895db13bfa4 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig | |||
@@ -59,7 +59,7 @@ config EP93XX_ETH | |||
59 | config IXP4XX_ETH | 59 | config IXP4XX_ETH |
60 | tristate "Intel IXP4xx Ethernet support" | 60 | tristate "Intel IXP4xx Ethernet support" |
61 | depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR | 61 | depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR |
62 | select MII | 62 | select PHYLIB |
63 | help | 63 | help |
64 | Say Y here if you want to use built-in Ethernet ports | 64 | Say Y here if you want to use built-in Ethernet ports |
65 | on IXP4xx processor. | 65 | on IXP4xx processor. |
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index b03609f2e90f..26af411fc428 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c | |||
@@ -30,12 +30,11 @@ | |||
30 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/mii.h> | 33 | #include <linux/phy.h> |
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <mach/npe.h> | 35 | #include <mach/npe.h> |
36 | #include <mach/qmgr.h> | 36 | #include <mach/qmgr.h> |
37 | 37 | ||
38 | #define DEBUG_QUEUES 0 | ||
39 | #define DEBUG_DESC 0 | 38 | #define DEBUG_DESC 0 |
40 | #define DEBUG_RX 0 | 39 | #define DEBUG_RX 0 |
41 | #define DEBUG_TX 0 | 40 | #define DEBUG_TX 0 |
@@ -59,7 +58,6 @@ | |||
59 | #define NAPI_WEIGHT 16 | 58 | #define NAPI_WEIGHT 16 |
60 | #define MDIO_INTERVAL (3 * HZ) | 59 | #define MDIO_INTERVAL (3 * HZ) |
61 | #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ | 60 | #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ |
62 | #define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */ | ||
63 | #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ | 61 | #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ |
64 | 62 | ||
65 | #define NPE_ID(port_id) ((port_id) >> 4) | 63 | #define NPE_ID(port_id) ((port_id) >> 4) |
@@ -164,15 +162,14 @@ struct port { | |||
164 | struct npe *npe; | 162 | struct npe *npe; |
165 | struct net_device *netdev; | 163 | struct net_device *netdev; |
166 | struct napi_struct napi; | 164 | struct napi_struct napi; |
167 | struct net_device_stats stat; | 165 | struct phy_device *phydev; |
168 | struct mii_if_info mii; | ||
169 | struct delayed_work mdio_thread; | ||
170 | struct eth_plat_info *plat; | 166 | struct eth_plat_info *plat; |
171 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; | 167 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; |
172 | struct desc *desc_tab; /* coherent */ | 168 | struct desc *desc_tab; /* coherent */ |
173 | u32 desc_tab_phys; | 169 | u32 desc_tab_phys; |
174 | int id; /* logical port ID */ | 170 | int id; /* logical port ID */ |
175 | u16 mii_bmcr; | 171 | int speed, duplex; |
172 | u8 firmware[4]; | ||
176 | }; | 173 | }; |
177 | 174 | ||
178 | /* NPE message structure */ | 175 | /* NPE message structure */ |
@@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) | |||
243 | 240 | ||
244 | static spinlock_t mdio_lock; | 241 | static spinlock_t mdio_lock; |
245 | static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ | 242 | static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ |
243 | struct mii_bus *mdio_bus; | ||
246 | static int ports_open; | 244 | static int ports_open; |
247 | static struct port *npe_port_tab[MAX_NPES]; | 245 | static struct port *npe_port_tab[MAX_NPES]; |
248 | static struct dma_pool *dma_pool; | 246 | static struct dma_pool *dma_pool; |
249 | 247 | ||
250 | 248 | ||
251 | static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, | 249 | static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, |
252 | int write, u16 cmd) | 250 | int write, u16 cmd) |
253 | { | 251 | { |
254 | int cycles = 0; | 252 | int cycles = 0; |
255 | 253 | ||
256 | if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { | 254 | if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { |
257 | printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); | 255 | printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); |
258 | return 0; | 256 | return -1; |
259 | } | 257 | } |
260 | 258 | ||
261 | if (write) { | 259 | if (write) { |
@@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, | |||
274 | } | 272 | } |
275 | 273 | ||
276 | if (cycles == MAX_MDIO_RETRIES) { | 274 | if (cycles == MAX_MDIO_RETRIES) { |
277 | printk(KERN_ERR "%s: MII write failed\n", dev->name); | 275 | printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, |
278 | return 0; | 276 | phy_id); |
277 | return -1; | ||
279 | } | 278 | } |
280 | 279 | ||
281 | #if DEBUG_MDIO | 280 | #if DEBUG_MDIO |
282 | printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, | 281 | printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, |
283 | cycles); | 282 | phy_id, write ? "write" : "read", cycles); |
284 | #endif | 283 | #endif |
285 | 284 | ||
286 | if (write) | 285 | if (write) |
287 | return 0; | 286 | return 0; |
288 | 287 | ||
289 | if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { | 288 | if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { |
290 | printk(KERN_ERR "%s: MII read failed\n", dev->name); | 289 | #if DEBUG_MDIO |
291 | return 0; | 290 | printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, |
291 | phy_id); | ||
292 | #endif | ||
293 | return 0xFFFF; /* don't return error */ | ||
292 | } | 294 | } |
293 | 295 | ||
294 | return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | | 296 | return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | |
295 | (__raw_readl(&mdio_regs->mdio_status[1]) << 8); | 297 | ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); |
296 | } | 298 | } |
297 | 299 | ||
298 | static int mdio_read(struct net_device *dev, int phy_id, int location) | 300 | static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) |
299 | { | 301 | { |
300 | unsigned long flags; | 302 | unsigned long flags; |
301 | u16 val; | 303 | int ret; |
302 | 304 | ||
303 | spin_lock_irqsave(&mdio_lock, flags); | 305 | spin_lock_irqsave(&mdio_lock, flags); |
304 | val = mdio_cmd(dev, phy_id, location, 0, 0); | 306 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); |
305 | spin_unlock_irqrestore(&mdio_lock, flags); | 307 | spin_unlock_irqrestore(&mdio_lock, flags); |
306 | return val; | 308 | #if DEBUG_MDIO |
309 | printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, | ||
310 | phy_id, location, ret); | ||
311 | #endif | ||
312 | return ret; | ||
307 | } | 313 | } |
308 | 314 | ||
309 | static void mdio_write(struct net_device *dev, int phy_id, int location, | 315 | static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, |
310 | int val) | 316 | u16 val) |
311 | { | 317 | { |
312 | unsigned long flags; | 318 | unsigned long flags; |
319 | int ret; | ||
313 | 320 | ||
314 | spin_lock_irqsave(&mdio_lock, flags); | 321 | spin_lock_irqsave(&mdio_lock, flags); |
315 | mdio_cmd(dev, phy_id, location, 1, val); | 322 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); |
316 | spin_unlock_irqrestore(&mdio_lock, flags); | 323 | spin_unlock_irqrestore(&mdio_lock, flags); |
324 | #if DEBUG_MDIO | ||
325 | printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n", | ||
326 | bus->name, phy_id, location, val, ret); | ||
327 | #endif | ||
328 | return ret; | ||
317 | } | 329 | } |
318 | 330 | ||
319 | static void phy_reset(struct net_device *dev, int phy_id) | 331 | static int ixp4xx_mdio_register(void) |
320 | { | 332 | { |
321 | struct port *port = netdev_priv(dev); | 333 | int err; |
322 | int cycles = 0; | ||
323 | 334 | ||
324 | mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); | 335 | if (!(mdio_bus = mdiobus_alloc())) |
336 | return -ENOMEM; | ||
325 | 337 | ||
326 | while (cycles < MAX_MII_RESET_RETRIES) { | 338 | /* All MII PHY accesses use NPE-B Ethernet registers */ |
327 | if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { | 339 | spin_lock_init(&mdio_lock); |
328 | #if DEBUG_MDIO | 340 | mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; |
329 | printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", | 341 | __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); |
330 | dev->name, cycles); | 342 | |
331 | #endif | 343 | mdio_bus->name = "IXP4xx MII Bus"; |
332 | return; | 344 | mdio_bus->read = &ixp4xx_mdio_read; |
333 | } | 345 | mdio_bus->write = &ixp4xx_mdio_write; |
334 | udelay(1); | 346 | strcpy(mdio_bus->id, "0"); |
335 | cycles++; | ||
336 | } | ||
337 | 347 | ||
338 | printk(KERN_ERR "%s: MII reset failed\n", dev->name); | 348 | if ((err = mdiobus_register(mdio_bus))) |
349 | mdiobus_free(mdio_bus); | ||
350 | return err; | ||
339 | } | 351 | } |
340 | 352 | ||
341 | static void eth_set_duplex(struct port *port) | 353 | static void ixp4xx_mdio_remove(void) |
342 | { | 354 | { |
343 | if (port->mii.full_duplex) | 355 | mdiobus_unregister(mdio_bus); |
344 | __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, | 356 | mdiobus_free(mdio_bus); |
345 | &port->regs->tx_control[0]); | ||
346 | else | ||
347 | __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, | ||
348 | &port->regs->tx_control[0]); | ||
349 | } | 357 | } |
350 | 358 | ||
351 | 359 | ||
352 | static void phy_check_media(struct port *port, int init) | 360 | static void ixp4xx_adjust_link(struct net_device *dev) |
353 | { | 361 | { |
354 | if (mii_check_media(&port->mii, 1, init)) | 362 | struct port *port = netdev_priv(dev); |
355 | eth_set_duplex(port); | 363 | struct phy_device *phydev = port->phydev; |
356 | if (port->mii.force_media) { /* mii_check_media() doesn't work */ | 364 | |
357 | struct net_device *dev = port->netdev; | 365 | if (!phydev->link) { |
358 | int cur_link = mii_link_ok(&port->mii); | 366 | if (port->speed) { |
359 | int prev_link = netif_carrier_ok(dev); | 367 | port->speed = 0; |
360 | |||
361 | if (!prev_link && cur_link) { | ||
362 | printk(KERN_INFO "%s: link up\n", dev->name); | ||
363 | netif_carrier_on(dev); | ||
364 | } else if (prev_link && !cur_link) { | ||
365 | printk(KERN_INFO "%s: link down\n", dev->name); | 368 | printk(KERN_INFO "%s: link down\n", dev->name); |
366 | netif_carrier_off(dev); | ||
367 | } | 369 | } |
370 | return; | ||
368 | } | 371 | } |
369 | } | ||
370 | 372 | ||
373 | if (port->speed == phydev->speed && port->duplex == phydev->duplex) | ||
374 | return; | ||
371 | 375 | ||
372 | static void mdio_thread(struct work_struct *work) | 376 | port->speed = phydev->speed; |
373 | { | 377 | port->duplex = phydev->duplex; |
374 | struct port *port = container_of(work, struct port, mdio_thread.work); | 378 | |
379 | if (port->duplex) | ||
380 | __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, | ||
381 | &port->regs->tx_control[0]); | ||
382 | else | ||
383 | __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, | ||
384 | &port->regs->tx_control[0]); | ||
375 | 385 | ||
376 | phy_check_media(port, 0); | 386 | printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", |
377 | schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); | 387 | dev->name, port->speed, port->duplex ? "full" : "half"); |
378 | } | 388 | } |
379 | 389 | ||
380 | 390 | ||
@@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc) | |||
412 | #endif | 422 | #endif |
413 | } | 423 | } |
414 | 424 | ||
415 | static inline void debug_queue(unsigned int queue, int is_get, u32 phys) | ||
416 | { | ||
417 | #if DEBUG_QUEUES | ||
418 | static struct { | ||
419 | int queue; | ||
420 | char *name; | ||
421 | } names[] = { | ||
422 | { TX_QUEUE(0x10), "TX#0 " }, | ||
423 | { TX_QUEUE(0x20), "TX#1 " }, | ||
424 | { TX_QUEUE(0x00), "TX#2 " }, | ||
425 | { RXFREE_QUEUE(0x10), "RX-free#0 " }, | ||
426 | { RXFREE_QUEUE(0x20), "RX-free#1 " }, | ||
427 | { RXFREE_QUEUE(0x00), "RX-free#2 " }, | ||
428 | { TXDONE_QUEUE, "TX-done " }, | ||
429 | }; | ||
430 | int i; | ||
431 | |||
432 | for (i = 0; i < ARRAY_SIZE(names); i++) | ||
433 | if (names[i].queue == queue) | ||
434 | break; | ||
435 | |||
436 | printk(KERN_DEBUG "Queue %i %s%s %X\n", queue, | ||
437 | i < ARRAY_SIZE(names) ? names[i].name : "", | ||
438 | is_get ? "->" : "<-", phys); | ||
439 | #endif | ||
440 | } | ||
441 | |||
442 | static inline u32 queue_get_entry(unsigned int queue) | ||
443 | { | ||
444 | u32 phys = qmgr_get_entry(queue); | ||
445 | debug_queue(queue, 1, phys); | ||
446 | return phys; | ||
447 | } | ||
448 | |||
449 | static inline int queue_get_desc(unsigned int queue, struct port *port, | 425 | static inline int queue_get_desc(unsigned int queue, struct port *port, |
450 | int is_tx) | 426 | int is_tx) |
451 | { | 427 | { |
452 | u32 phys, tab_phys, n_desc; | 428 | u32 phys, tab_phys, n_desc; |
453 | struct desc *tab; | 429 | struct desc *tab; |
454 | 430 | ||
455 | if (!(phys = queue_get_entry(queue))) | 431 | if (!(phys = qmgr_get_entry(queue))) |
456 | return -1; | 432 | return -1; |
457 | 433 | ||
458 | phys &= ~0x1F; /* mask out non-address bits */ | 434 | phys &= ~0x1F; /* mask out non-address bits */ |
@@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port, | |||
468 | static inline void queue_put_desc(unsigned int queue, u32 phys, | 444 | static inline void queue_put_desc(unsigned int queue, u32 phys, |
469 | struct desc *desc) | 445 | struct desc *desc) |
470 | { | 446 | { |
471 | debug_queue(queue, 0, phys); | ||
472 | debug_desc(phys, desc); | 447 | debug_desc(phys, desc); |
473 | BUG_ON(phys & 0x1F); | 448 | BUG_ON(phys & 0x1F); |
474 | qmgr_put_entry(queue, phys); | 449 | qmgr_put_entry(queue, phys); |
@@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
562 | #endif | 537 | #endif |
563 | 538 | ||
564 | if (!skb) { | 539 | if (!skb) { |
565 | port->stat.rx_dropped++; | 540 | dev->stats.rx_dropped++; |
566 | /* put the desc back on RX-ready queue */ | 541 | /* put the desc back on RX-ready queue */ |
567 | desc->buf_len = MAX_MRU; | 542 | desc->buf_len = MAX_MRU; |
568 | desc->pkt_len = 0; | 543 | desc->pkt_len = 0; |
@@ -588,8 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
588 | debug_pkt(dev, "eth_poll", skb->data, skb->len); | 563 | debug_pkt(dev, "eth_poll", skb->data, skb->len); |
589 | 564 | ||
590 | skb->protocol = eth_type_trans(skb, dev); | 565 | skb->protocol = eth_type_trans(skb, dev); |
591 | port->stat.rx_packets++; | 566 | dev->stats.rx_packets++; |
592 | port->stat.rx_bytes += skb->len; | 567 | dev->stats.rx_bytes += skb->len; |
593 | netif_receive_skb(skb); | 568 | netif_receive_skb(skb); |
594 | 569 | ||
595 | /* put the new buffer on RX-free queue */ | 570 | /* put the new buffer on RX-free queue */ |
@@ -617,7 +592,7 @@ static void eth_txdone_irq(void *unused) | |||
617 | #if DEBUG_TX | 592 | #if DEBUG_TX |
618 | printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); | 593 | printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); |
619 | #endif | 594 | #endif |
620 | while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) { | 595 | while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { |
621 | u32 npe_id, n_desc; | 596 | u32 npe_id, n_desc; |
622 | struct port *port; | 597 | struct port *port; |
623 | struct desc *desc; | 598 | struct desc *desc; |
@@ -634,8 +609,8 @@ static void eth_txdone_irq(void *unused) | |||
634 | debug_desc(phys, desc); | 609 | debug_desc(phys, desc); |
635 | 610 | ||
636 | if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ | 611 | if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ |
637 | port->stat.tx_packets++; | 612 | port->netdev->stats.tx_packets++; |
638 | port->stat.tx_bytes += desc->pkt_len; | 613 | port->netdev->stats.tx_bytes += desc->pkt_len; |
639 | 614 | ||
640 | dma_unmap_tx(port, desc); | 615 | dma_unmap_tx(port, desc); |
641 | #if DEBUG_TX | 616 | #if DEBUG_TX |
@@ -673,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
673 | 648 | ||
674 | if (unlikely(skb->len > MAX_MRU)) { | 649 | if (unlikely(skb->len > MAX_MRU)) { |
675 | dev_kfree_skb(skb); | 650 | dev_kfree_skb(skb); |
676 | port->stat.tx_errors++; | 651 | dev->stats.tx_errors++; |
677 | return NETDEV_TX_OK; | 652 | return NETDEV_TX_OK; |
678 | } | 653 | } |
679 | 654 | ||
@@ -689,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
689 | bytes = ALIGN(offset + len, 4); | 664 | bytes = ALIGN(offset + len, 4); |
690 | if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { | 665 | if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { |
691 | dev_kfree_skb(skb); | 666 | dev_kfree_skb(skb); |
692 | port->stat.tx_dropped++; | 667 | dev->stats.tx_dropped++; |
693 | return NETDEV_TX_OK; | 668 | return NETDEV_TX_OK; |
694 | } | 669 | } |
695 | memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); | 670 | memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); |
@@ -703,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
703 | #else | 678 | #else |
704 | kfree(mem); | 679 | kfree(mem); |
705 | #endif | 680 | #endif |
706 | port->stat.tx_dropped++; | 681 | dev->stats.tx_dropped++; |
707 | return NETDEV_TX_OK; | 682 | return NETDEV_TX_OK; |
708 | } | 683 | } |
709 | 684 | ||
@@ -746,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
746 | } | 721 | } |
747 | 722 | ||
748 | 723 | ||
749 | static struct net_device_stats *eth_stats(struct net_device *dev) | ||
750 | { | ||
751 | struct port *port = netdev_priv(dev); | ||
752 | return &port->stat; | ||
753 | } | ||
754 | |||
755 | static void eth_set_mcast_list(struct net_device *dev) | 724 | static void eth_set_mcast_list(struct net_device *dev) |
756 | { | 725 | { |
757 | struct port *port = netdev_priv(dev); | 726 | struct port *port = netdev_priv(dev); |
@@ -785,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev) | |||
785 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | 754 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) |
786 | { | 755 | { |
787 | struct port *port = netdev_priv(dev); | 756 | struct port *port = netdev_priv(dev); |
788 | unsigned int duplex_chg; | ||
789 | int err; | ||
790 | 757 | ||
791 | if (!netif_running(dev)) | 758 | if (!netif_running(dev)) |
792 | return -EINVAL; | 759 | return -EINVAL; |
793 | err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); | 760 | return phy_mii_ioctl(port->phydev, if_mii(req), cmd); |
794 | if (duplex_chg) | 761 | } |
795 | eth_set_duplex(port); | 762 | |
796 | return err; | 763 | /* ethtool support */ |
764 | |||
765 | static void ixp4xx_get_drvinfo(struct net_device *dev, | ||
766 | struct ethtool_drvinfo *info) | ||
767 | { | ||
768 | struct port *port = netdev_priv(dev); | ||
769 | strcpy(info->driver, DRV_NAME); | ||
770 | snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", | ||
771 | port->firmware[0], port->firmware[1], | ||
772 | port->firmware[2], port->firmware[3]); | ||
773 | strcpy(info->bus_info, "internal"); | ||
797 | } | 774 | } |
798 | 775 | ||
776 | static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
777 | { | ||
778 | struct port *port = netdev_priv(dev); | ||
779 | return phy_ethtool_gset(port->phydev, cmd); | ||
780 | } | ||
781 | |||
782 | static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
783 | { | ||
784 | struct port *port = netdev_priv(dev); | ||
785 | return phy_ethtool_sset(port->phydev, cmd); | ||
786 | } | ||
787 | |||
788 | static int ixp4xx_nway_reset(struct net_device *dev) | ||
789 | { | ||
790 | struct port *port = netdev_priv(dev); | ||
791 | return phy_start_aneg(port->phydev); | ||
792 | } | ||
793 | |||
794 | static struct ethtool_ops ixp4xx_ethtool_ops = { | ||
795 | .get_drvinfo = ixp4xx_get_drvinfo, | ||
796 | .get_settings = ixp4xx_get_settings, | ||
797 | .set_settings = ixp4xx_set_settings, | ||
798 | .nway_reset = ixp4xx_nway_reset, | ||
799 | .get_link = ethtool_op_get_link, | ||
800 | }; | ||
801 | |||
799 | 802 | ||
800 | static int request_queues(struct port *port) | 803 | static int request_queues(struct port *port) |
801 | { | 804 | { |
802 | int err; | 805 | int err; |
803 | 806 | ||
804 | err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0); | 807 | err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, |
808 | "%s:RX-free", port->netdev->name); | ||
805 | if (err) | 809 | if (err) |
806 | return err; | 810 | return err; |
807 | 811 | ||
808 | err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0); | 812 | err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, |
813 | "%s:RX", port->netdev->name); | ||
809 | if (err) | 814 | if (err) |
810 | goto rel_rxfree; | 815 | goto rel_rxfree; |
811 | 816 | ||
812 | err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0); | 817 | err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, |
818 | "%s:TX", port->netdev->name); | ||
813 | if (err) | 819 | if (err) |
814 | goto rel_rx; | 820 | goto rel_rx; |
815 | 821 | ||
816 | err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0); | 822 | err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, |
823 | "%s:TX-ready", port->netdev->name); | ||
817 | if (err) | 824 | if (err) |
818 | goto rel_tx; | 825 | goto rel_tx; |
819 | 826 | ||
820 | /* TX-done queue handles skbs sent out by the NPEs */ | 827 | /* TX-done queue handles skbs sent out by the NPEs */ |
821 | if (!ports_open) { | 828 | if (!ports_open) { |
822 | err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0); | 829 | err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, |
830 | "%s:TX-done", DRV_NAME); | ||
823 | if (err) | 831 | if (err) |
824 | goto rel_txready; | 832 | goto rel_txready; |
825 | } | 833 | } |
@@ -943,10 +951,12 @@ static int eth_open(struct net_device *dev) | |||
943 | npe_name(npe)); | 951 | npe_name(npe)); |
944 | return -EIO; | 952 | return -EIO; |
945 | } | 953 | } |
954 | port->firmware[0] = msg.byte4; | ||
955 | port->firmware[1] = msg.byte5; | ||
956 | port->firmware[2] = msg.byte6; | ||
957 | port->firmware[3] = msg.byte7; | ||
946 | } | 958 | } |
947 | 959 | ||
948 | mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr); | ||
949 | |||
950 | memset(&msg, 0, sizeof(msg)); | 960 | memset(&msg, 0, sizeof(msg)); |
951 | msg.cmd = NPE_VLAN_SETRXQOSENTRY; | 961 | msg.cmd = NPE_VLAN_SETRXQOSENTRY; |
952 | msg.eth_id = port->id; | 962 | msg.eth_id = port->id; |
@@ -984,6 +994,9 @@ static int eth_open(struct net_device *dev) | |||
984 | return err; | 994 | return err; |
985 | } | 995 | } |
986 | 996 | ||
997 | port->speed = 0; /* force "link up" message */ | ||
998 | phy_start(port->phydev); | ||
999 | |||
987 | for (i = 0; i < ETH_ALEN; i++) | 1000 | for (i = 0; i < ETH_ALEN; i++) |
988 | __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); | 1001 | __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); |
989 | __raw_writel(0x08, &port->regs->random_seed); | 1002 | __raw_writel(0x08, &port->regs->random_seed); |
@@ -1011,10 +1024,8 @@ static int eth_open(struct net_device *dev) | |||
1011 | __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); | 1024 | __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); |
1012 | 1025 | ||
1013 | napi_enable(&port->napi); | 1026 | napi_enable(&port->napi); |
1014 | phy_check_media(port, 1); | ||
1015 | eth_set_mcast_list(dev); | 1027 | eth_set_mcast_list(dev); |
1016 | netif_start_queue(dev); | 1028 | netif_start_queue(dev); |
1017 | schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); | ||
1018 | 1029 | ||
1019 | qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, | 1030 | qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, |
1020 | eth_rx_irq, dev); | 1031 | eth_rx_irq, dev); |
@@ -1105,25 +1116,31 @@ static int eth_close(struct net_device *dev) | |||
1105 | printk(KERN_CRIT "%s: unable to disable loopback\n", | 1116 | printk(KERN_CRIT "%s: unable to disable loopback\n", |
1106 | dev->name); | 1117 | dev->name); |
1107 | 1118 | ||
1108 | port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & | 1119 | phy_stop(port->phydev); |
1109 | ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */ | ||
1110 | mdio_write(dev, port->plat->phy, MII_BMCR, | ||
1111 | port->mii_bmcr | BMCR_PDOWN); | ||
1112 | 1120 | ||
1113 | if (!ports_open) | 1121 | if (!ports_open) |
1114 | qmgr_disable_irq(TXDONE_QUEUE); | 1122 | qmgr_disable_irq(TXDONE_QUEUE); |
1115 | cancel_rearming_delayed_work(&port->mdio_thread); | ||
1116 | destroy_queues(port); | 1123 | destroy_queues(port); |
1117 | release_queues(port); | 1124 | release_queues(port); |
1118 | return 0; | 1125 | return 0; |
1119 | } | 1126 | } |
1120 | 1127 | ||
1128 | static const struct net_device_ops ixp4xx_netdev_ops = { | ||
1129 | .ndo_open = eth_open, | ||
1130 | .ndo_stop = eth_close, | ||
1131 | .ndo_start_xmit = eth_xmit, | ||
1132 | .ndo_set_multicast_list = eth_set_mcast_list, | ||
1133 | .ndo_do_ioctl = eth_ioctl, | ||
1134 | |||
1135 | }; | ||
1136 | |||
1121 | static int __devinit eth_init_one(struct platform_device *pdev) | 1137 | static int __devinit eth_init_one(struct platform_device *pdev) |
1122 | { | 1138 | { |
1123 | struct port *port; | 1139 | struct port *port; |
1124 | struct net_device *dev; | 1140 | struct net_device *dev; |
1125 | struct eth_plat_info *plat = pdev->dev.platform_data; | 1141 | struct eth_plat_info *plat = pdev->dev.platform_data; |
1126 | u32 regs_phys; | 1142 | u32 regs_phys; |
1143 | char phy_id[BUS_ID_SIZE]; | ||
1127 | int err; | 1144 | int err; |
1128 | 1145 | ||
1129 | if (!(dev = alloc_etherdev(sizeof(struct port)))) | 1146 | if (!(dev = alloc_etherdev(sizeof(struct port)))) |
@@ -1152,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev) | |||
1152 | goto err_free; | 1169 | goto err_free; |
1153 | } | 1170 | } |
1154 | 1171 | ||
1155 | dev->open = eth_open; | 1172 | dev->netdev_ops = &ixp4xx_netdev_ops; |
1156 | dev->hard_start_xmit = eth_xmit; | 1173 | dev->ethtool_ops = &ixp4xx_ethtool_ops; |
1157 | dev->stop = eth_close; | ||
1158 | dev->get_stats = eth_stats; | ||
1159 | dev->do_ioctl = eth_ioctl; | ||
1160 | dev->set_multicast_list = eth_set_mcast_list; | ||
1161 | dev->tx_queue_len = 100; | 1174 | dev->tx_queue_len = 100; |
1162 | 1175 | ||
1163 | netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); | 1176 | netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); |
@@ -1190,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev) | |||
1190 | __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); | 1203 | __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); |
1191 | udelay(50); | 1204 | udelay(50); |
1192 | 1205 | ||
1193 | port->mii.dev = dev; | 1206 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy); |
1194 | port->mii.mdio_read = mdio_read; | 1207 | port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, |
1195 | port->mii.mdio_write = mdio_write; | 1208 | PHY_INTERFACE_MODE_MII); |
1196 | port->mii.phy_id = plat->phy; | 1209 | if (IS_ERR(port->phydev)) { |
1197 | port->mii.phy_id_mask = 0x1F; | 1210 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); |
1198 | port->mii.reg_num_mask = 0x1F; | 1211 | return PTR_ERR(port->phydev); |
1212 | } | ||
1213 | |||
1214 | port->phydev->irq = PHY_POLL; | ||
1199 | 1215 | ||
1200 | printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, | 1216 | printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, |
1201 | npe_name(port->npe)); | 1217 | npe_name(port->npe)); |
1202 | 1218 | ||
1203 | phy_reset(dev, plat->phy); | ||
1204 | port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) & | ||
1205 | ~(BMCR_RESET | BMCR_PDOWN); | ||
1206 | mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN); | ||
1207 | |||
1208 | INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread); | ||
1209 | return 0; | 1219 | return 0; |
1210 | 1220 | ||
1211 | err_unreg: | 1221 | err_unreg: |
@@ -1231,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev) | |||
1231 | return 0; | 1241 | return 0; |
1232 | } | 1242 | } |
1233 | 1243 | ||
1234 | static struct platform_driver drv = { | 1244 | static struct platform_driver ixp4xx_eth_driver = { |
1235 | .driver.name = DRV_NAME, | 1245 | .driver.name = DRV_NAME, |
1236 | .probe = eth_init_one, | 1246 | .probe = eth_init_one, |
1237 | .remove = eth_remove_one, | 1247 | .remove = eth_remove_one, |
@@ -1239,20 +1249,19 @@ static struct platform_driver drv = { | |||
1239 | 1249 | ||
1240 | static int __init eth_init_module(void) | 1250 | static int __init eth_init_module(void) |
1241 | { | 1251 | { |
1252 | int err; | ||
1242 | if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) | 1253 | if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) |
1243 | return -ENOSYS; | 1254 | return -ENOSYS; |
1244 | 1255 | ||
1245 | /* All MII PHY accesses use NPE-B Ethernet registers */ | 1256 | if ((err = ixp4xx_mdio_register())) |
1246 | spin_lock_init(&mdio_lock); | 1257 | return err; |
1247 | mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; | 1258 | return platform_driver_register(&ixp4xx_eth_driver); |
1248 | __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); | ||
1249 | |||
1250 | return platform_driver_register(&drv); | ||
1251 | } | 1259 | } |
1252 | 1260 | ||
1253 | static void __exit eth_cleanup_module(void) | 1261 | static void __exit eth_cleanup_module(void) |
1254 | { | 1262 | { |
1255 | platform_driver_unregister(&drv); | 1263 | platform_driver_unregister(&ixp4xx_eth_driver); |
1264 | ixp4xx_mdio_remove(); | ||
1256 | } | 1265 | } |
1257 | 1266 | ||
1258 | MODULE_AUTHOR("Krzysztof Halasa"); | 1267 | MODULE_AUTHOR("Krzysztof Halasa"); |