aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethoc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethoc.c')
-rw-r--r--drivers/net/ethoc.c160
1 files changed, 112 insertions, 48 deletions
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8..b79d7e1555d 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h>
22#include <net/ethoc.h> 23#include <net/ethoc.h>
23 24
24static int buffer_size = 0x8000; /* 32 KBytes */ 25static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
184 * @netdev: pointer to network device structure 185 * @netdev: pointer to network device structure
185 * @napi: NAPI structure 186 * @napi: NAPI structure
186 * @msg_enable: device state flags 187 * @msg_enable: device state flags
187 * @rx_lock: receive lock
188 * @lock: device lock 188 * @lock: device lock
189 * @phy: attached PHY 189 * @phy: attached PHY
190 * @mdio: MDIO bus for PHY access 190 * @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
209 struct napi_struct napi; 209 struct napi_struct napi;
210 u32 msg_enable; 210 u32 msg_enable;
211 211
212 spinlock_t rx_lock;
213 spinlock_t lock; 212 spinlock_t lock;
214 213
215 struct phy_device *phy; 214 struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
413 unsigned int entry; 412 unsigned int entry;
414 struct ethoc_bd bd; 413 struct ethoc_bd bd;
415 414
416 entry = priv->num_tx + (priv->cur_rx % priv->num_rx); 415 entry = priv->num_tx + priv->cur_rx;
417 ethoc_read_bd(priv, entry, &bd); 416 ethoc_read_bd(priv, entry, &bd);
418 if (bd.stat & RX_BD_EMPTY) 417 if (bd.stat & RX_BD_EMPTY) {
419 break; 418 ethoc_ack_irq(priv, INT_MASK_RX);
419 /* If packet (interrupt) came in between checking
420 * BD_EMTPY and clearing the interrupt source, then we
421 * risk missing the packet as the RX interrupt won't
422 * trigger right away when we reenable it; hence, check
423 * BD_EMTPY here again to make sure there isn't such a
424 * packet waiting for us...
425 */
426 ethoc_read_bd(priv, entry, &bd);
427 if (bd.stat & RX_BD_EMPTY)
428 break;
429 }
420 430
421 if (ethoc_update_rx_stats(priv, &bd) == 0) { 431 if (ethoc_update_rx_stats(priv, &bd) == 0) {
422 int size = bd.stat >> 16; 432 int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
446 bd.stat &= ~RX_BD_STATS; 456 bd.stat &= ~RX_BD_STATS;
447 bd.stat |= RX_BD_EMPTY; 457 bd.stat |= RX_BD_EMPTY;
448 ethoc_write_bd(priv, entry, &bd); 458 ethoc_write_bd(priv, entry, &bd);
449 priv->cur_rx++; 459 if (++priv->cur_rx == priv->num_rx)
460 priv->cur_rx = 0;
450 } 461 }
451 462
452 return count; 463 return count;
453} 464}
454 465
455static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 466static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
456{ 467{
457 struct net_device *netdev = dev->netdev; 468 struct net_device *netdev = dev->netdev;
458 469
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
482 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 493 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
483 netdev->stats.tx_bytes += bd->stat >> 16; 494 netdev->stats.tx_bytes += bd->stat >> 16;
484 netdev->stats.tx_packets++; 495 netdev->stats.tx_packets++;
485 return 0;
486} 496}
487 497
488static void ethoc_tx(struct net_device *dev) 498static int ethoc_tx(struct net_device *dev, int limit)
489{ 499{
490 struct ethoc *priv = netdev_priv(dev); 500 struct ethoc *priv = netdev_priv(dev);
501 int count;
502 struct ethoc_bd bd;
491 503
492 spin_lock(&priv->lock); 504 for (count = 0; count < limit; ++count) {
505 unsigned int entry;
493 506
494 while (priv->dty_tx != priv->cur_tx) { 507 entry = priv->dty_tx & (priv->num_tx-1);
495 unsigned int entry = priv->dty_tx % priv->num_tx;
496 struct ethoc_bd bd;
497 508
498 ethoc_read_bd(priv, entry, &bd); 509 ethoc_read_bd(priv, entry, &bd);
499 if (bd.stat & TX_BD_READY)
500 break;
501 510
502 entry = (++priv->dty_tx) % priv->num_tx; 511 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
503 (void)ethoc_update_tx_stats(priv, &bd); 512 ethoc_ack_irq(priv, INT_MASK_TX);
513 /* If interrupt came in between reading in the BD
514 * and clearing the interrupt source, then we risk
515 * missing the event as the TX interrupt won't trigger
516 * right away when we reenable it; hence, check
517 * BD_EMPTY here again to make sure there isn't such an
518 * event pending...
519 */
520 ethoc_read_bd(priv, entry, &bd);
521 if (bd.stat & TX_BD_READY ||
522 (priv->dty_tx == priv->cur_tx))
523 break;
524 }
525
526 ethoc_update_tx_stats(priv, &bd);
527 priv->dty_tx++;
504 } 528 }
505 529
506 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 530 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
507 netif_wake_queue(dev); 531 netif_wake_queue(dev);
508 532
509 ethoc_ack_irq(priv, INT_MASK_TX); 533 return count;
510 spin_unlock(&priv->lock);
511} 534}
512 535
513static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 536static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
515 struct net_device *dev = dev_id; 538 struct net_device *dev = dev_id;
516 struct ethoc *priv = netdev_priv(dev); 539 struct ethoc *priv = netdev_priv(dev);
517 u32 pending; 540 u32 pending;
518 541 u32 mask;
519 ethoc_disable_irq(priv, INT_MASK_ALL); 542
543 /* Figure out what triggered the interrupt...
544 * The tricky bit here is that the interrupt source bits get
545 * set in INT_SOURCE for an event irregardless of whether that
546 * event is masked or not. Thus, in order to figure out what
547 * triggered the interrupt, we need to remove the sources
548 * for all events that are currently masked. This behaviour
549 * is not particularly well documented but reasonable...
550 */
551 mask = ethoc_read(priv, INT_MASK);
520 pending = ethoc_read(priv, INT_SOURCE); 552 pending = ethoc_read(priv, INT_SOURCE);
553 pending &= mask;
554
521 if (unlikely(pending == 0)) { 555 if (unlikely(pending == 0)) {
522 ethoc_enable_irq(priv, INT_MASK_ALL);
523 return IRQ_NONE; 556 return IRQ_NONE;
524 } 557 }
525 558
526 ethoc_ack_irq(priv, pending); 559 ethoc_ack_irq(priv, pending);
527 560
561 /* We always handle the dropped packet interrupt */
528 if (pending & INT_MASK_BUSY) { 562 if (pending & INT_MASK_BUSY) {
529 dev_err(&dev->dev, "packet dropped\n"); 563 dev_err(&dev->dev, "packet dropped\n");
530 dev->stats.rx_dropped++; 564 dev->stats.rx_dropped++;
531 } 565 }
532 566
533 if (pending & INT_MASK_RX) { 567 /* Handle receive/transmit event by switching to polling */
534 if (napi_schedule_prep(&priv->napi)) 568 if (pending & (INT_MASK_TX | INT_MASK_RX)) {
535 __napi_schedule(&priv->napi); 569 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
536 } else { 570 napi_schedule(&priv->napi);
537 ethoc_enable_irq(priv, INT_MASK_RX);
538 } 571 }
539 572
540 if (pending & INT_MASK_TX)
541 ethoc_tx(dev);
542
543 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
544 return IRQ_HANDLED; 573 return IRQ_HANDLED;
545} 574}
546 575
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
566static int ethoc_poll(struct napi_struct *napi, int budget) 595static int ethoc_poll(struct napi_struct *napi, int budget)
567{ 596{
568 struct ethoc *priv = container_of(napi, struct ethoc, napi); 597 struct ethoc *priv = container_of(napi, struct ethoc, napi);
569 int work_done = 0; 598 int rx_work_done = 0;
599 int tx_work_done = 0;
600
601 rx_work_done = ethoc_rx(priv->netdev, budget);
602 tx_work_done = ethoc_tx(priv->netdev, budget);
570 603
571 work_done = ethoc_rx(priv->netdev, budget); 604 if (rx_work_done < budget && tx_work_done < budget) {
572 if (work_done < budget) {
573 ethoc_enable_irq(priv, INT_MASK_RX);
574 napi_complete(napi); 605 napi_complete(napi);
606 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
575 } 607 }
576 608
577 return work_done; 609 return rx_work_done;
578} 610}
579 611
580static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 612static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
581{ 613{
582 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
583 struct ethoc *priv = bus->priv; 614 struct ethoc *priv = bus->priv;
615 int i;
584 616
585 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 617 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 618 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
587 619
588 while (time_before(jiffies, timeout)) { 620 for (i=0; i < 5; i++) {
589 u32 status = ethoc_read(priv, MIISTATUS); 621 u32 status = ethoc_read(priv, MIISTATUS);
590 if (!(status & MIISTATUS_BUSY)) { 622 if (!(status & MIISTATUS_BUSY)) {
591 u32 data = ethoc_read(priv, MIIRX_DATA); 623 u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
593 ethoc_write(priv, MIICOMMAND, 0); 625 ethoc_write(priv, MIICOMMAND, 0);
594 return data; 626 return data;
595 } 627 }
596 628 usleep_range(100,200);
597 schedule();
598 } 629 }
599 630
600 return -EBUSY; 631 return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
602 633
603static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 634static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
604{ 635{
605 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
606 struct ethoc *priv = bus->priv; 636 struct ethoc *priv = bus->priv;
637 int i;
607 638
608 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 639 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
609 ethoc_write(priv, MIITX_DATA, val); 640 ethoc_write(priv, MIITX_DATA, val);
610 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 641 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
611 642
612 while (time_before(jiffies, timeout)) { 643 for (i=0; i < 5; i++) {
613 u32 stat = ethoc_read(priv, MIISTATUS); 644 u32 stat = ethoc_read(priv, MIISTATUS);
614 if (!(stat & MIISTATUS_BUSY)) { 645 if (!(stat & MIISTATUS_BUSY)) {
615 /* reset MII command register */ 646 /* reset MII command register */
616 ethoc_write(priv, MIICOMMAND, 0); 647 ethoc_write(priv, MIICOMMAND, 0);
617 return 0; 648 return 0;
618 } 649 }
619 650 usleep_range(100,200);
620 schedule();
621 } 651 }
622 652
623 return -EBUSY; 653 return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
971 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1001 /* calculate the number of TX/RX buffers, maximum 128 supported */
972 num_bd = min_t(unsigned int, 1002 num_bd = min_t(unsigned int,
973 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1003 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
974 priv->num_tx = max(2, num_bd / 4); 1004 if (num_bd < 4) {
1005 ret = -ENODEV;
1006 goto error;
1007 }
1008 /* num_tx must be a power of two */
1009 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
975 priv->num_rx = num_bd - priv->num_tx; 1010 priv->num_rx = num_bd - priv->num_tx;
976 1011
1012 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1013 priv->num_tx, priv->num_rx);
1014
977 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); 1015 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
978 if (!priv->vma) { 1016 if (!priv->vma) {
979 ret = -ENOMEM; 1017 ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
982 1020
983 /* Allow the platform setup code to pass in a MAC address. */ 1021 /* Allow the platform setup code to pass in a MAC address. */
984 if (pdev->dev.platform_data) { 1022 if (pdev->dev.platform_data) {
985 struct ethoc_platform_data *pdata = 1023 struct ethoc_platform_data *pdata = pdev->dev.platform_data;
986 (struct ethoc_platform_data *)pdev->dev.platform_data;
987 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1024 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
988 priv->phy_id = pdata->phy_id; 1025 priv->phy_id = pdata->phy_id;
1026 } else {
1027 priv->phy_id = -1;
1028
1029#ifdef CONFIG_OF
1030 {
1031 const uint8_t* mac;
1032
1033 mac = of_get_property(pdev->dev.of_node,
1034 "local-mac-address",
1035 NULL);
1036 if (mac)
1037 memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1038 }
1039#endif
989 } 1040 }
990 1041
991 /* Check that the given MAC address is valid. If it isn't, read the 1042 /* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
1046 /* setup NAPI */ 1097 /* setup NAPI */
1047 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1098 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1048 1099
1049 spin_lock_init(&priv->rx_lock);
1050 spin_lock_init(&priv->lock); 1100 spin_lock_init(&priv->lock);
1051 1101
1052 ret = register_netdev(netdev); 1102 ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
1113# define ethoc_resume NULL 1163# define ethoc_resume NULL
1114#endif 1164#endif
1115 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = {
1168 {
1169 .compatible = "opencores,ethoc",
1170 },
1171 {},
1172};
1173MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175
1116static struct platform_driver ethoc_driver = { 1176static struct platform_driver ethoc_driver = {
1117 .probe = ethoc_probe, 1177 .probe = ethoc_probe,
1118 .remove = __devexit_p(ethoc_remove), 1178 .remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
1120 .resume = ethoc_resume, 1180 .resume = ethoc_resume,
1121 .driver = { 1181 .driver = {
1122 .name = "ethoc", 1182 .name = "ethoc",
1183 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match,
1186#endif
1123 }, 1187 },
1124}; 1188};
1125 1189