aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/greth.c159
1 files changed, 92 insertions, 67 deletions
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index b888abe6cd21..fdb0333f5cb6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. 2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 * 3 *
4 * 2005-2009 (c) Aeroflex Gaisler AB 4 * 2005-2010 (c) Aeroflex Gaisler AB
5 * 5 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs 6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library. 7 * available in the GRLIB VHDL IP core library.
@@ -392,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
392 struct greth_private *greth = netdev_priv(dev); 392 struct greth_private *greth = netdev_priv(dev);
393 struct greth_bd *bdp; 393 struct greth_bd *bdp;
394 int err = NETDEV_TX_OK; 394 int err = NETDEV_TX_OK;
395 u32 status, dma_addr; 395 u32 status, dma_addr, ctrl;
396 unsigned long flags;
396 397
397 bdp = greth->tx_bd_base + greth->tx_next; 398 /* Clean TX Ring */
399 greth_clean_tx(greth->netdev);
398 400
399 if (unlikely(greth->tx_free <= 0)) { 401 if (unlikely(greth->tx_free <= 0)) {
402 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
403 ctrl = GRETH_REGLOAD(greth->regs->control);
404 /* Enable TX IRQ only if not already in poll() routine */
405 if (ctrl & GRETH_RXI)
406 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
400 netif_stop_queue(dev); 407 netif_stop_queue(dev);
408 spin_unlock_irqrestore(&greth->devlock, flags);
401 return NETDEV_TX_BUSY; 409 return NETDEV_TX_BUSY;
402 } 410 }
403 411
@@ -410,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
410 goto out; 418 goto out;
411 } 419 }
412 420
421 bdp = greth->tx_bd_base + greth->tx_next;
413 dma_addr = greth_read_bd(&bdp->addr); 422 dma_addr = greth_read_bd(&bdp->addr);
414 423
415 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); 424 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
416 425
417 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 426 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
418 427
419 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); 428 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
420 429
421 /* Wrap around descriptor ring */ 430 /* Wrap around descriptor ring */
422 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 431 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -426,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
426 greth->tx_next = NEXT_TX(greth->tx_next); 435 greth->tx_next = NEXT_TX(greth->tx_next);
427 greth->tx_free--; 436 greth->tx_free--;
428 437
429 /* No more descriptors */
430 if (unlikely(greth->tx_free == 0)) {
431
432 /* Free transmitted descriptors */
433 greth_clean_tx(dev);
434
435 /* If nothing was cleaned, stop queue & wait for irq */
436 if (unlikely(greth->tx_free == 0)) {
437 status |= GRETH_BD_IE;
438 netif_stop_queue(dev);
439 }
440 }
441
442 /* Write descriptor control word and enable transmission */ 438 /* Write descriptor control word and enable transmission */
443 greth_write_bd(&bdp->stat, status); 439 greth_write_bd(&bdp->stat, status);
440 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
444 greth_enable_tx(greth); 441 greth_enable_tx(greth);
442 spin_unlock_irqrestore(&greth->devlock, flags);
445 443
446out: 444out:
447 dev_kfree_skb(skb); 445 dev_kfree_skb(skb);
@@ -454,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
454{ 452{
455 struct greth_private *greth = netdev_priv(dev); 453 struct greth_private *greth = netdev_priv(dev);
456 struct greth_bd *bdp; 454 struct greth_bd *bdp;
457 u32 status = 0, dma_addr; 455 u32 status = 0, dma_addr, ctrl;
458 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 456 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
457 unsigned long flags;
459 458
460 nr_frags = skb_shinfo(skb)->nr_frags; 459 nr_frags = skb_shinfo(skb)->nr_frags;
461 460
461 /* Clean TX Ring */
462 greth_clean_tx_gbit(dev);
463
462 if (greth->tx_free < nr_frags + 1) { 464 if (greth->tx_free < nr_frags + 1) {
465 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
466 ctrl = GRETH_REGLOAD(greth->regs->control);
467 /* Enable TX IRQ only if not already in poll() routine */
468 if (ctrl & GRETH_RXI)
469 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
463 netif_stop_queue(dev); 470 netif_stop_queue(dev);
471 spin_unlock_irqrestore(&greth->devlock, flags);
464 err = NETDEV_TX_BUSY; 472 err = NETDEV_TX_BUSY;
465 goto out; 473 goto out;
466 } 474 }
@@ -513,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
513 /* More fragments left */ 521 /* More fragments left */
514 if (i < nr_frags - 1) 522 if (i < nr_frags - 1)
515 status |= GRETH_TXBD_MORE; 523 status |= GRETH_TXBD_MORE;
516 524 else
517 /* ... last fragment, check if out of descriptors */ 525 status |= GRETH_BD_IE; /* enable IRQ on last fragment */
518 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
519
520 /* Enable interrupts and stop queue */
521 status |= GRETH_BD_IE;
522 netif_stop_queue(dev);
523 }
524 526
525 greth_write_bd(&bdp->stat, status); 527 greth_write_bd(&bdp->stat, status);
526 528
@@ -548,7 +550,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
548 550
549 wmb(); 551 wmb();
550 552
553 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
551 greth_enable_tx(greth); 554 greth_enable_tx(greth);
555 spin_unlock_irqrestore(&greth->devlock, flags);
552 556
553 return NETDEV_TX_OK; 557 return NETDEV_TX_OK;
554 558
@@ -570,12 +574,11 @@ out:
570 return err; 574 return err;
571} 575}
572 576
573
574static irqreturn_t greth_interrupt(int irq, void *dev_id) 577static irqreturn_t greth_interrupt(int irq, void *dev_id)
575{ 578{
576 struct net_device *dev = dev_id; 579 struct net_device *dev = dev_id;
577 struct greth_private *greth; 580 struct greth_private *greth;
578 u32 status; 581 u32 status, ctrl;
579 irqreturn_t retval = IRQ_NONE; 582 irqreturn_t retval = IRQ_NONE;
580 583
581 greth = netdev_priv(dev); 584 greth = netdev_priv(dev);
@@ -585,14 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
585 /* Get the interrupt events that caused us to be here. */ 588 /* Get the interrupt events that caused us to be here. */
586 status = GRETH_REGLOAD(greth->regs->status); 589 status = GRETH_REGLOAD(greth->regs->status);
587 590
588 /* Handle rx and tx interrupts through poll */ 591 /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
589 if (status & (GRETH_INT_RE | GRETH_INT_RX | 592 * set regardless of whether IRQ is enabled or not. Especially
590 GRETH_INT_TE | GRETH_INT_TX)) { 593 * important when shared IRQ.
594 */
595 ctrl = GRETH_REGLOAD(greth->regs->control);
591 596
592 /* Clear interrupt status */ 597 /* Handle rx and tx interrupts through poll */
593 GRETH_REGSAVE(greth->regs->status, 598 if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
594 status & (GRETH_INT_RE | GRETH_INT_RX | 599 ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
595 GRETH_INT_TE | GRETH_INT_TX));
596 retval = IRQ_HANDLED; 600 retval = IRQ_HANDLED;
597 601
598 /* Disable interrupts and schedule poll() */ 602 /* Disable interrupts and schedule poll() */
@@ -616,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
616 620
617 while (1) { 621 while (1) {
618 bdp = greth->tx_bd_base + greth->tx_last; 622 bdp = greth->tx_bd_base + greth->tx_last;
623 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
624 mb();
619 stat = greth_read_bd(&bdp->stat); 625 stat = greth_read_bd(&bdp->stat);
620 626
621 if (unlikely(stat & GRETH_BD_EN)) 627 if (unlikely(stat & GRETH_BD_EN))
@@ -676,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
676 682
677 /* We only clean fully completed SKBs */ 683 /* We only clean fully completed SKBs */
678 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); 684 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
679 stat = bdp_last_frag->stat; 685
686 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
687 mb();
688 stat = greth_read_bd(&bdp_last_frag->stat);
680 689
681 if (stat & GRETH_BD_EN) 690 if (stat & GRETH_BD_EN)
682 break; 691 break;
@@ -708,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
708 greth->tx_free += nr_frags+1; 717 greth->tx_free += nr_frags+1;
709 dev_kfree_skb(skb); 718 dev_kfree_skb(skb);
710 } 719 }
711 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
712 netif_wake_queue(dev);
713 }
714}
715 720
716static int greth_pending_packets(struct greth_private *greth) 721 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
717{ 722 netif_wake_queue(dev);
718 struct greth_bd *bdp;
719 u32 status;
720 bdp = greth->rx_bd_base + greth->rx_cur;
721 status = greth_read_bd(&bdp->stat);
722 if (status & GRETH_BD_EN)
723 return 0;
724 else
725 return 1;
726} 723}
727 724
728static int greth_rx(struct net_device *dev, int limit) 725static int greth_rx(struct net_device *dev, int limit)
@@ -733,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
733 int pkt_len; 730 int pkt_len;
734 int bad, count; 731 int bad, count;
735 u32 status, dma_addr; 732 u32 status, dma_addr;
733 unsigned long flags;
736 734
737 greth = netdev_priv(dev); 735 greth = netdev_priv(dev);
738 736
739 for (count = 0; count < limit; ++count) { 737 for (count = 0; count < limit; ++count) {
740 738
741 bdp = greth->rx_bd_base + greth->rx_cur; 739 bdp = greth->rx_bd_base + greth->rx_cur;
740 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
741 mb();
742 status = greth_read_bd(&bdp->stat); 742 status = greth_read_bd(&bdp->stat);
743 dma_addr = greth_read_bd(&bdp->addr);
744 bad = 0;
745 743
746 if (unlikely(status & GRETH_BD_EN)) { 744 if (unlikely(status & GRETH_BD_EN)) {
747 break; 745 break;
748 } 746 }
749 747
748 dma_addr = greth_read_bd(&bdp->addr);
749 bad = 0;
750
750 /* Check status for errors. */ 751 /* Check status for errors. */
751 if (unlikely(status & GRETH_RXBD_STATUS)) { 752 if (unlikely(status & GRETH_RXBD_STATUS)) {
752 if (status & GRETH_RXBD_ERR_FT) { 753 if (status & GRETH_RXBD_ERR_FT) {
@@ -808,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
808 809
809 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); 810 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
810 811
812 spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
811 greth_enable_rx(greth); 813 greth_enable_rx(greth);
814 spin_unlock_irqrestore(&greth->devlock, flags);
812 815
813 greth->rx_cur = NEXT_RX(greth->rx_cur); 816 greth->rx_cur = NEXT_RX(greth->rx_cur);
814 } 817 }
@@ -842,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
842 int pkt_len; 845 int pkt_len;
843 int bad, count = 0; 846 int bad, count = 0;
844 u32 status, dma_addr; 847 u32 status, dma_addr;
848 unsigned long flags;
845 849
846 greth = netdev_priv(dev); 850 greth = netdev_priv(dev);
847 851
@@ -849,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
849 853
850 bdp = greth->rx_bd_base + greth->rx_cur; 854 bdp = greth->rx_bd_base + greth->rx_cur;
851 skb = greth->rx_skbuff[greth->rx_cur]; 855 skb = greth->rx_skbuff[greth->rx_cur];
856 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
857 mb();
852 status = greth_read_bd(&bdp->stat); 858 status = greth_read_bd(&bdp->stat);
853 bad = 0; 859 bad = 0;
854 860
@@ -936,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
936 942
937 wmb(); 943 wmb();
938 greth_write_bd(&bdp->stat, status); 944 greth_write_bd(&bdp->stat, status);
945 spin_lock_irqsave(&greth->devlock, flags);
939 greth_enable_rx(greth); 946 greth_enable_rx(greth);
947 spin_unlock_irqrestore(&greth->devlock, flags);
940 greth->rx_cur = NEXT_RX(greth->rx_cur); 948 greth->rx_cur = NEXT_RX(greth->rx_cur);
941 } 949 }
942 950
@@ -948,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
948{ 956{
949 struct greth_private *greth; 957 struct greth_private *greth;
950 int work_done = 0; 958 int work_done = 0;
959 unsigned long flags;
960 u32 mask, ctrl;
951 greth = container_of(napi, struct greth_private, napi); 961 greth = container_of(napi, struct greth_private, napi);
952 962
953 if (greth->gbit_mac) { 963restart_txrx_poll:
954 greth_clean_tx_gbit(greth->netdev); 964 if (netif_queue_stopped(greth->netdev)) {
955 } else { 965 if (greth->gbit_mac)
956 greth_clean_tx(greth->netdev); 966 greth_clean_tx_gbit(greth->netdev);
967 else
968 greth_clean_tx(greth->netdev);
957 } 969 }
958 970
959restart_poll:
960 if (greth->gbit_mac) { 971 if (greth->gbit_mac) {
961 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 972 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
962 } else { 973 } else {
@@ -965,15 +976,29 @@ restart_poll:
965 976
966 if (work_done < budget) { 977 if (work_done < budget) {
967 978
968 napi_complete(napi); 979 spin_lock_irqsave(&greth->devlock, flags);
969 980
970 if (greth_pending_packets(greth)) { 981 ctrl = GRETH_REGLOAD(greth->regs->control);
971 napi_reschedule(napi); 982 if (netif_queue_stopped(greth->netdev)) {
972 goto restart_poll; 983 GRETH_REGSAVE(greth->regs->control,
984 ctrl | GRETH_TXI | GRETH_RXI);
985 mask = GRETH_INT_RX | GRETH_INT_RE |
986 GRETH_INT_TX | GRETH_INT_TE;
987 } else {
988 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
989 mask = GRETH_INT_RX | GRETH_INT_RE;
990 }
991
992 if (GRETH_REGLOAD(greth->regs->status) & mask) {
993 GRETH_REGSAVE(greth->regs->control, ctrl);
994 spin_unlock_irqrestore(&greth->devlock, flags);
995 goto restart_txrx_poll;
996 } else {
997 __napi_complete(napi);
998 spin_unlock_irqrestore(&greth->devlock, flags);
973 } 999 }
974 } 1000 }
975 1001
976 greth_enable_irqs(greth);
977 return work_done; 1002 return work_done;
978} 1003}
979 1004
@@ -1168,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
1168}; 1193};
1169 1194
1170static struct net_device_ops greth_netdev_ops = { 1195static struct net_device_ops greth_netdev_ops = {
1171 .ndo_open = greth_open, 1196 .ndo_open = greth_open,
1172 .ndo_stop = greth_close, 1197 .ndo_stop = greth_close,
1173 .ndo_start_xmit = greth_start_xmit, 1198 .ndo_start_xmit = greth_start_xmit,
1174 .ndo_set_mac_address = greth_set_mac_add, 1199 .ndo_set_mac_address = greth_set_mac_add,
1175 .ndo_validate_addr = eth_validate_addr, 1200 .ndo_validate_addr = eth_validate_addr,
1176}; 1201};
1177 1202
1178static inline int wait_for_mdio(struct greth_private *greth) 1203static inline int wait_for_mdio(struct greth_private *greth)