diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/ethoc.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/ethoc.c')
-rw-r--r-- | drivers/net/ethoc.c | 160 |
1 files changed, 106 insertions, 54 deletions
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 6d653c459c1f..a83dd312c3ac 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/of.h> | ||
22 | #include <net/ethoc.h> | 23 | #include <net/ethoc.h> |
23 | 24 | ||
24 | static int buffer_size = 0x8000; /* 32 KBytes */ | 25 | static int buffer_size = 0x8000; /* 32 KBytes */ |
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); | |||
184 | * @netdev: pointer to network device structure | 185 | * @netdev: pointer to network device structure |
185 | * @napi: NAPI structure | 186 | * @napi: NAPI structure |
186 | * @msg_enable: device state flags | 187 | * @msg_enable: device state flags |
187 | * @rx_lock: receive lock | ||
188 | * @lock: device lock | 188 | * @lock: device lock |
189 | * @phy: attached PHY | 189 | * @phy: attached PHY |
190 | * @mdio: MDIO bus for PHY access | 190 | * @mdio: MDIO bus for PHY access |
@@ -209,7 +209,6 @@ struct ethoc { | |||
209 | struct napi_struct napi; | 209 | struct napi_struct napi; |
210 | u32 msg_enable; | 210 | u32 msg_enable; |
211 | 211 | ||
212 | spinlock_t rx_lock; | ||
213 | spinlock_t lock; | 212 | spinlock_t lock; |
214 | 213 | ||
215 | struct phy_device *phy; | 214 | struct phy_device *phy; |
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit) | |||
413 | unsigned int entry; | 412 | unsigned int entry; |
414 | struct ethoc_bd bd; | 413 | struct ethoc_bd bd; |
415 | 414 | ||
416 | entry = priv->num_tx + (priv->cur_rx % priv->num_rx); | 415 | entry = priv->num_tx + priv->cur_rx; |
417 | ethoc_read_bd(priv, entry, &bd); | 416 | ethoc_read_bd(priv, entry, &bd); |
418 | if (bd.stat & RX_BD_EMPTY) | 417 | if (bd.stat & RX_BD_EMPTY) { |
419 | break; | 418 | ethoc_ack_irq(priv, INT_MASK_RX); |
419 | /* If packet (interrupt) came in between checking | ||
420 | * BD_EMTPY and clearing the interrupt source, then we | ||
421 | * risk missing the packet as the RX interrupt won't | ||
422 | * trigger right away when we reenable it; hence, check | ||
423 | * BD_EMTPY here again to make sure there isn't such a | ||
424 | * packet waiting for us... | ||
425 | */ | ||
426 | ethoc_read_bd(priv, entry, &bd); | ||
427 | if (bd.stat & RX_BD_EMPTY) | ||
428 | break; | ||
429 | } | ||
420 | 430 | ||
421 | if (ethoc_update_rx_stats(priv, &bd) == 0) { | 431 | if (ethoc_update_rx_stats(priv, &bd) == 0) { |
422 | int size = bd.stat >> 16; | 432 | int size = bd.stat >> 16; |
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit) | |||
446 | bd.stat &= ~RX_BD_STATS; | 456 | bd.stat &= ~RX_BD_STATS; |
447 | bd.stat |= RX_BD_EMPTY; | 457 | bd.stat |= RX_BD_EMPTY; |
448 | ethoc_write_bd(priv, entry, &bd); | 458 | ethoc_write_bd(priv, entry, &bd); |
449 | priv->cur_rx++; | 459 | if (++priv->cur_rx == priv->num_rx) |
460 | priv->cur_rx = 0; | ||
450 | } | 461 | } |
451 | 462 | ||
452 | return count; | 463 | return count; |
453 | } | 464 | } |
454 | 465 | ||
455 | static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) | 466 | static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) |
456 | { | 467 | { |
457 | struct net_device *netdev = dev->netdev; | 468 | struct net_device *netdev = dev->netdev; |
458 | 469 | ||
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) | |||
482 | netdev->stats.collisions += (bd->stat >> 4) & 0xf; | 493 | netdev->stats.collisions += (bd->stat >> 4) & 0xf; |
483 | netdev->stats.tx_bytes += bd->stat >> 16; | 494 | netdev->stats.tx_bytes += bd->stat >> 16; |
484 | netdev->stats.tx_packets++; | 495 | netdev->stats.tx_packets++; |
485 | return 0; | ||
486 | } | 496 | } |
487 | 497 | ||
488 | static void ethoc_tx(struct net_device *dev) | 498 | static int ethoc_tx(struct net_device *dev, int limit) |
489 | { | 499 | { |
490 | struct ethoc *priv = netdev_priv(dev); | 500 | struct ethoc *priv = netdev_priv(dev); |
501 | int count; | ||
502 | struct ethoc_bd bd; | ||
491 | 503 | ||
492 | spin_lock(&priv->lock); | 504 | for (count = 0; count < limit; ++count) { |
505 | unsigned int entry; | ||
493 | 506 | ||
494 | while (priv->dty_tx != priv->cur_tx) { | 507 | entry = priv->dty_tx & (priv->num_tx-1); |
495 | unsigned int entry = priv->dty_tx % priv->num_tx; | ||
496 | struct ethoc_bd bd; | ||
497 | 508 | ||
498 | ethoc_read_bd(priv, entry, &bd); | 509 | ethoc_read_bd(priv, entry, &bd); |
499 | if (bd.stat & TX_BD_READY) | ||
500 | break; | ||
501 | 510 | ||
502 | entry = (++priv->dty_tx) % priv->num_tx; | 511 | if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { |
503 | (void)ethoc_update_tx_stats(priv, &bd); | 512 | ethoc_ack_irq(priv, INT_MASK_TX); |
513 | /* If interrupt came in between reading in the BD | ||
514 | * and clearing the interrupt source, then we risk | ||
515 | * missing the event as the TX interrupt won't trigger | ||
516 | * right away when we reenable it; hence, check | ||
517 | * BD_EMPTY here again to make sure there isn't such an | ||
518 | * event pending... | ||
519 | */ | ||
520 | ethoc_read_bd(priv, entry, &bd); | ||
521 | if (bd.stat & TX_BD_READY || | ||
522 | (priv->dty_tx == priv->cur_tx)) | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | ethoc_update_tx_stats(priv, &bd); | ||
527 | priv->dty_tx++; | ||
504 | } | 528 | } |
505 | 529 | ||
506 | if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) | 530 | if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) |
507 | netif_wake_queue(dev); | 531 | netif_wake_queue(dev); |
508 | 532 | ||
509 | ethoc_ack_irq(priv, INT_MASK_TX); | 533 | return count; |
510 | spin_unlock(&priv->lock); | ||
511 | } | 534 | } |
512 | 535 | ||
513 | static irqreturn_t ethoc_interrupt(int irq, void *dev_id) | 536 | static irqreturn_t ethoc_interrupt(int irq, void *dev_id) |
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id) | |||
515 | struct net_device *dev = dev_id; | 538 | struct net_device *dev = dev_id; |
516 | struct ethoc *priv = netdev_priv(dev); | 539 | struct ethoc *priv = netdev_priv(dev); |
517 | u32 pending; | 540 | u32 pending; |
518 | 541 | u32 mask; | |
519 | ethoc_disable_irq(priv, INT_MASK_ALL); | 542 | |
543 | /* Figure out what triggered the interrupt... | ||
544 | * The tricky bit here is that the interrupt source bits get | ||
545 | * set in INT_SOURCE for an event regardless of whether that | ||
546 | * event is masked or not. Thus, in order to figure out what | ||
547 | * triggered the interrupt, we need to remove the sources | ||
548 | * for all events that are currently masked. This behaviour | ||
549 | * is not particularly well documented but reasonable... | ||
550 | */ | ||
551 | mask = ethoc_read(priv, INT_MASK); | ||
520 | pending = ethoc_read(priv, INT_SOURCE); | 552 | pending = ethoc_read(priv, INT_SOURCE); |
553 | pending &= mask; | ||
554 | |||
521 | if (unlikely(pending == 0)) { | 555 | if (unlikely(pending == 0)) { |
522 | ethoc_enable_irq(priv, INT_MASK_ALL); | ||
523 | return IRQ_NONE; | 556 | return IRQ_NONE; |
524 | } | 557 | } |
525 | 558 | ||
526 | ethoc_ack_irq(priv, pending); | 559 | ethoc_ack_irq(priv, pending); |
527 | 560 | ||
561 | /* We always handle the dropped packet interrupt */ | ||
528 | if (pending & INT_MASK_BUSY) { | 562 | if (pending & INT_MASK_BUSY) { |
529 | dev_err(&dev->dev, "packet dropped\n"); | 563 | dev_err(&dev->dev, "packet dropped\n"); |
530 | dev->stats.rx_dropped++; | 564 | dev->stats.rx_dropped++; |
531 | } | 565 | } |
532 | 566 | ||
533 | if (pending & INT_MASK_RX) { | 567 | /* Handle receive/transmit event by switching to polling */ |
534 | if (napi_schedule_prep(&priv->napi)) | 568 | if (pending & (INT_MASK_TX | INT_MASK_RX)) { |
535 | __napi_schedule(&priv->napi); | 569 | ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); |
536 | } else { | 570 | napi_schedule(&priv->napi); |
537 | ethoc_enable_irq(priv, INT_MASK_RX); | ||
538 | } | 571 | } |
539 | 572 | ||
540 | if (pending & INT_MASK_TX) | ||
541 | ethoc_tx(dev); | ||
542 | |||
543 | ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX); | ||
544 | return IRQ_HANDLED; | 573 | return IRQ_HANDLED; |
545 | } | 574 | } |
546 | 575 | ||
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr) | |||
566 | static int ethoc_poll(struct napi_struct *napi, int budget) | 595 | static int ethoc_poll(struct napi_struct *napi, int budget) |
567 | { | 596 | { |
568 | struct ethoc *priv = container_of(napi, struct ethoc, napi); | 597 | struct ethoc *priv = container_of(napi, struct ethoc, napi); |
569 | int work_done = 0; | 598 | int rx_work_done = 0; |
599 | int tx_work_done = 0; | ||
600 | |||
601 | rx_work_done = ethoc_rx(priv->netdev, budget); | ||
602 | tx_work_done = ethoc_tx(priv->netdev, budget); | ||
570 | 603 | ||
571 | work_done = ethoc_rx(priv->netdev, budget); | 604 | if (rx_work_done < budget && tx_work_done < budget) { |
572 | if (work_done < budget) { | ||
573 | ethoc_enable_irq(priv, INT_MASK_RX); | ||
574 | napi_complete(napi); | 605 | napi_complete(napi); |
606 | ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); | ||
575 | } | 607 | } |
576 | 608 | ||
577 | return work_done; | 609 | return rx_work_done; |
578 | } | 610 | } |
579 | 611 | ||
580 | static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) | 612 | static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) |
581 | { | 613 | { |
582 | unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT; | ||
583 | struct ethoc *priv = bus->priv; | 614 | struct ethoc *priv = bus->priv; |
615 | int i; | ||
584 | 616 | ||
585 | ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); | 617 | ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); |
586 | ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); | 618 | ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); |
587 | 619 | ||
588 | while (time_before(jiffies, timeout)) { | 620 | for (i=0; i < 5; i++) { |
589 | u32 status = ethoc_read(priv, MIISTATUS); | 621 | u32 status = ethoc_read(priv, MIISTATUS); |
590 | if (!(status & MIISTATUS_BUSY)) { | 622 | if (!(status & MIISTATUS_BUSY)) { |
591 | u32 data = ethoc_read(priv, MIIRX_DATA); | 623 | u32 data = ethoc_read(priv, MIIRX_DATA); |
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) | |||
593 | ethoc_write(priv, MIICOMMAND, 0); | 625 | ethoc_write(priv, MIICOMMAND, 0); |
594 | return data; | 626 | return data; |
595 | } | 627 | } |
596 | 628 | usleep_range(100,200); | |
597 | schedule(); | ||
598 | } | 629 | } |
599 | 630 | ||
600 | return -EBUSY; | 631 | return -EBUSY; |
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) | |||
602 | 633 | ||
603 | static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) | 634 | static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) |
604 | { | 635 | { |
605 | unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT; | ||
606 | struct ethoc *priv = bus->priv; | 636 | struct ethoc *priv = bus->priv; |
637 | int i; | ||
607 | 638 | ||
608 | ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); | 639 | ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); |
609 | ethoc_write(priv, MIITX_DATA, val); | 640 | ethoc_write(priv, MIITX_DATA, val); |
610 | ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); | 641 | ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); |
611 | 642 | ||
612 | while (time_before(jiffies, timeout)) { | 643 | for (i=0; i < 5; i++) { |
613 | u32 stat = ethoc_read(priv, MIISTATUS); | 644 | u32 stat = ethoc_read(priv, MIISTATUS); |
614 | if (!(stat & MIISTATUS_BUSY)) { | 645 | if (!(stat & MIISTATUS_BUSY)) { |
615 | /* reset MII command register */ | 646 | /* reset MII command register */ |
616 | ethoc_write(priv, MIICOMMAND, 0); | 647 | ethoc_write(priv, MIICOMMAND, 0); |
617 | return 0; | 648 | return 0; |
618 | } | 649 | } |
619 | 650 | usleep_range(100,200); | |
620 | schedule(); | ||
621 | } | 651 | } |
622 | 652 | ||
623 | return -EBUSY; | 653 | return -EBUSY; |
@@ -806,11 +836,6 @@ static void ethoc_tx_timeout(struct net_device *dev) | |||
806 | ethoc_interrupt(dev->irq, dev); | 836 | ethoc_interrupt(dev->irq, dev); |
807 | } | 837 | } |
808 | 838 | ||
809 | static struct net_device_stats *ethoc_stats(struct net_device *dev) | ||
810 | { | ||
811 | return &dev->stats; | ||
812 | } | ||
813 | |||
814 | static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) | 839 | static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) |
815 | { | 840 | { |
816 | struct ethoc *priv = netdev_priv(dev); | 841 | struct ethoc *priv = netdev_priv(dev); |
@@ -863,7 +888,6 @@ static const struct net_device_ops ethoc_netdev_ops = { | |||
863 | .ndo_set_multicast_list = ethoc_set_multicast_list, | 888 | .ndo_set_multicast_list = ethoc_set_multicast_list, |
864 | .ndo_change_mtu = ethoc_change_mtu, | 889 | .ndo_change_mtu = ethoc_change_mtu, |
865 | .ndo_tx_timeout = ethoc_tx_timeout, | 890 | .ndo_tx_timeout = ethoc_tx_timeout, |
866 | .ndo_get_stats = ethoc_stats, | ||
867 | .ndo_start_xmit = ethoc_start_xmit, | 891 | .ndo_start_xmit = ethoc_start_xmit, |
868 | }; | 892 | }; |
869 | 893 | ||
@@ -977,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev) | |||
977 | /* calculate the number of TX/RX buffers, maximum 128 supported */ | 1001 | /* calculate the number of TX/RX buffers, maximum 128 supported */ |
978 | num_bd = min_t(unsigned int, | 1002 | num_bd = min_t(unsigned int, |
979 | 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); | 1003 | 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); |
980 | priv->num_tx = max(2, num_bd / 4); | 1004 | if (num_bd < 4) { |
1005 | ret = -ENODEV; | ||
1006 | goto error; | ||
1007 | } | ||
1008 | /* num_tx must be a power of two */ | ||
1009 | priv->num_tx = rounddown_pow_of_two(num_bd >> 1); | ||
981 | priv->num_rx = num_bd - priv->num_tx; | 1010 | priv->num_rx = num_bd - priv->num_tx; |
982 | 1011 | ||
1012 | dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", | ||
1013 | priv->num_tx, priv->num_rx); | ||
1014 | |||
983 | priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); | 1015 | priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); |
984 | if (!priv->vma) { | 1016 | if (!priv->vma) { |
985 | ret = -ENOMEM; | 1017 | ret = -ENOMEM; |
@@ -988,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev) | |||
988 | 1020 | ||
989 | /* Allow the platform setup code to pass in a MAC address. */ | 1021 | /* Allow the platform setup code to pass in a MAC address. */ |
990 | if (pdev->dev.platform_data) { | 1022 | if (pdev->dev.platform_data) { |
991 | struct ethoc_platform_data *pdata = | 1023 | struct ethoc_platform_data *pdata = pdev->dev.platform_data; |
992 | (struct ethoc_platform_data *)pdev->dev.platform_data; | ||
993 | memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); | 1024 | memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); |
994 | priv->phy_id = pdata->phy_id; | 1025 | priv->phy_id = pdata->phy_id; |
1026 | } else { | ||
1027 | priv->phy_id = -1; | ||
1028 | |||
1029 | #ifdef CONFIG_OF | ||
1030 | { | ||
1031 | const uint8_t* mac; | ||
1032 | |||
1033 | mac = of_get_property(pdev->dev.of_node, | ||
1034 | "local-mac-address", | ||
1035 | NULL); | ||
1036 | if (mac) | ||
1037 | memcpy(netdev->dev_addr, mac, IFHWADDRLEN); | ||
1038 | } | ||
1039 | #endif | ||
995 | } | 1040 | } |
996 | 1041 | ||
997 | /* Check that the given MAC address is valid. If it isn't, read the | 1042 | /* Check that the given MAC address is valid. If it isn't, read the |
@@ -1052,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev) | |||
1052 | /* setup NAPI */ | 1097 | /* setup NAPI */ |
1053 | netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); | 1098 | netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); |
1054 | 1099 | ||
1055 | spin_lock_init(&priv->rx_lock); | ||
1056 | spin_lock_init(&priv->lock); | 1100 | spin_lock_init(&priv->lock); |
1057 | 1101 | ||
1058 | ret = register_netdev(netdev); | 1102 | ret = register_netdev(netdev); |
@@ -1119,6 +1163,12 @@ static int ethoc_resume(struct platform_device *pdev) | |||
1119 | # define ethoc_resume NULL | 1163 | # define ethoc_resume NULL |
1120 | #endif | 1164 | #endif |
1121 | 1165 | ||
1166 | static struct of_device_id ethoc_match[] = { | ||
1167 | { .compatible = "opencores,ethoc", }, | ||
1168 | {}, | ||
1169 | }; | ||
1170 | MODULE_DEVICE_TABLE(of, ethoc_match); | ||
1171 | |||
1122 | static struct platform_driver ethoc_driver = { | 1172 | static struct platform_driver ethoc_driver = { |
1123 | .probe = ethoc_probe, | 1173 | .probe = ethoc_probe, |
1124 | .remove = __devexit_p(ethoc_remove), | 1174 | .remove = __devexit_p(ethoc_remove), |
@@ -1126,6 +1176,8 @@ static struct platform_driver ethoc_driver = { | |||
1126 | .resume = ethoc_resume, | 1176 | .resume = ethoc_resume, |
1127 | .driver = { | 1177 | .driver = { |
1128 | .name = "ethoc", | 1178 | .name = "ethoc", |
1179 | .owner = THIS_MODULE, | ||
1180 | .of_match_table = ethoc_match, | ||
1129 | }, | 1181 | }, |
1130 | }; | 1182 | }; |
1131 | 1183 | ||