aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/greth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/greth.c')
-rw-r--r--drivers/net/greth.c221
1 files changed, 131 insertions, 90 deletions
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960ce09e..fdb0333f5cb6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. 2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 * 3 *
4 * 2005-2009 (c) Aeroflex Gaisler AB 4 * 2005-2010 (c) Aeroflex Gaisler AB
5 * 5 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs 6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library. 7 * available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev)
356 dev_dbg(&dev->dev, " starting queue\n"); 356 dev_dbg(&dev->dev, " starting queue\n");
357 netif_start_queue(dev); 357 netif_start_queue(dev);
358 358
359 GRETH_REGSAVE(greth->regs->status, 0xFF);
360
359 napi_enable(&greth->napi); 361 napi_enable(&greth->napi);
360 362
361 greth_enable_irqs(greth); 363 greth_enable_irqs(greth);
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev)
371 373
372 napi_disable(&greth->napi); 374 napi_disable(&greth->napi);
373 375
376 greth_disable_irqs(greth);
374 greth_disable_tx(greth); 377 greth_disable_tx(greth);
378 greth_disable_rx(greth);
375 379
376 netif_stop_queue(dev); 380 netif_stop_queue(dev);
377 381
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
388 struct greth_private *greth = netdev_priv(dev); 392 struct greth_private *greth = netdev_priv(dev);
389 struct greth_bd *bdp; 393 struct greth_bd *bdp;
390 int err = NETDEV_TX_OK; 394 int err = NETDEV_TX_OK;
391 u32 status, dma_addr; 395 u32 status, dma_addr, ctrl;
396 unsigned long flags;
392 397
393 bdp = greth->tx_bd_base + greth->tx_next; 398 /* Clean TX Ring */
399 greth_clean_tx(greth->netdev);
394 400
395 if (unlikely(greth->tx_free <= 0)) { 401 if (unlikely(greth->tx_free <= 0)) {
402 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
403 ctrl = GRETH_REGLOAD(greth->regs->control);
404 /* Enable TX IRQ only if not already in poll() routine */
405 if (ctrl & GRETH_RXI)
406 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
396 netif_stop_queue(dev); 407 netif_stop_queue(dev);
408 spin_unlock_irqrestore(&greth->devlock, flags);
397 return NETDEV_TX_BUSY; 409 return NETDEV_TX_BUSY;
398 } 410 }
399 411
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
406 goto out; 418 goto out;
407 } 419 }
408 420
421 bdp = greth->tx_bd_base + greth->tx_next;
409 dma_addr = greth_read_bd(&bdp->addr); 422 dma_addr = greth_read_bd(&bdp->addr);
410 423
411 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); 424 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
412 425
413 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 426 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
414 427
415 status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); 428 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
416 429
417 /* Wrap around descriptor ring */ 430 /* Wrap around descriptor ring */
418 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { 431 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
422 greth->tx_next = NEXT_TX(greth->tx_next); 435 greth->tx_next = NEXT_TX(greth->tx_next);
423 greth->tx_free--; 436 greth->tx_free--;
424 437
425 /* No more descriptors */
426 if (unlikely(greth->tx_free == 0)) {
427
428 /* Free transmitted descriptors */
429 greth_clean_tx(dev);
430
431 /* If nothing was cleaned, stop queue & wait for irq */
432 if (unlikely(greth->tx_free == 0)) {
433 status |= GRETH_BD_IE;
434 netif_stop_queue(dev);
435 }
436 }
437
438 /* Write descriptor control word and enable transmission */ 438 /* Write descriptor control word and enable transmission */
439 greth_write_bd(&bdp->stat, status); 439 greth_write_bd(&bdp->stat, status);
440 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
440 greth_enable_tx(greth); 441 greth_enable_tx(greth);
442 spin_unlock_irqrestore(&greth->devlock, flags);
441 443
442out: 444out:
443 dev_kfree_skb(skb); 445 dev_kfree_skb(skb);
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
450{ 452{
451 struct greth_private *greth = netdev_priv(dev); 453 struct greth_private *greth = netdev_priv(dev);
452 struct greth_bd *bdp; 454 struct greth_bd *bdp;
453 u32 status = 0, dma_addr; 455 u32 status = 0, dma_addr, ctrl;
454 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 456 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
457 unsigned long flags;
455 458
456 nr_frags = skb_shinfo(skb)->nr_frags; 459 nr_frags = skb_shinfo(skb)->nr_frags;
457 460
461 /* Clean TX Ring */
462 greth_clean_tx_gbit(dev);
463
458 if (greth->tx_free < nr_frags + 1) { 464 if (greth->tx_free < nr_frags + 1) {
465 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
466 ctrl = GRETH_REGLOAD(greth->regs->control);
467 /* Enable TX IRQ only if not already in poll() routine */
468 if (ctrl & GRETH_RXI)
469 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
459 netif_stop_queue(dev); 470 netif_stop_queue(dev);
471 spin_unlock_irqrestore(&greth->devlock, flags);
460 err = NETDEV_TX_BUSY; 472 err = NETDEV_TX_BUSY;
461 goto out; 473 goto out;
462 } 474 }
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
499 greth->tx_skbuff[curr_tx] = NULL; 511 greth->tx_skbuff[curr_tx] = NULL;
500 bdp = greth->tx_bd_base + curr_tx; 512 bdp = greth->tx_bd_base + curr_tx;
501 513
502 status = GRETH_TXBD_CSALL; 514 status = GRETH_TXBD_CSALL | GRETH_BD_EN;
503 status |= frag->size & GRETH_BD_LEN; 515 status |= frag->size & GRETH_BD_LEN;
504 516
505 /* Wrap around descriptor ring */ 517 /* Wrap around descriptor ring */
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
509 /* More fragments left */ 521 /* More fragments left */
510 if (i < nr_frags - 1) 522 if (i < nr_frags - 1)
511 status |= GRETH_TXBD_MORE; 523 status |= GRETH_TXBD_MORE;
512 524 else
513 /* ... last fragment, check if out of descriptors */ 525 status |= GRETH_BD_IE; /* enable IRQ on last fragment */
514 else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
515
516 /* Enable interrupts and stop queue */
517 status |= GRETH_BD_IE;
518 netif_stop_queue(dev);
519 }
520 526
521 greth_write_bd(&bdp->stat, status); 527 greth_write_bd(&bdp->stat, status);
522 528
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
536 542
537 wmb(); 543 wmb();
538 544
539 /* Enable the descriptors that we configured ... */ 545 /* Enable the descriptor chain by enabling the first descriptor */
540 for (i = 0; i < nr_frags + 1; i++) { 546 bdp = greth->tx_bd_base + greth->tx_next;
541 bdp = greth->tx_bd_base + greth->tx_next; 547 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
542 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); 548 greth->tx_next = curr_tx;
543 greth->tx_next = NEXT_TX(greth->tx_next); 549 greth->tx_free -= nr_frags + 1;
544 greth->tx_free--;
545 }
546 550
551 wmb();
552
553 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
547 greth_enable_tx(greth); 554 greth_enable_tx(greth);
555 spin_unlock_irqrestore(&greth->devlock, flags);
548 556
549 return NETDEV_TX_OK; 557 return NETDEV_TX_OK;
550 558
551frag_map_error: 559frag_map_error:
552 /* Unmap SKB mappings that succeeded */ 560 /* Unmap SKB mappings that succeeded and disable descriptor */
553 for (i = 0; greth->tx_next + i != curr_tx; i++) { 561 for (i = 0; greth->tx_next + i != curr_tx; i++) {
554 bdp = greth->tx_bd_base + greth->tx_next + i; 562 bdp = greth->tx_bd_base + greth->tx_next + i;
555 dma_unmap_single(greth->dev, 563 dma_unmap_single(greth->dev,
556 greth_read_bd(&bdp->addr), 564 greth_read_bd(&bdp->addr),
557 greth_read_bd(&bdp->stat) & GRETH_BD_LEN, 565 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
558 DMA_TO_DEVICE); 566 DMA_TO_DEVICE);
567 greth_write_bd(&bdp->stat, 0);
559 } 568 }
560map_error: 569map_error:
561 if (net_ratelimit()) 570 if (net_ratelimit())
@@ -565,12 +574,11 @@ out:
565 return err; 574 return err;
566} 575}
567 576
568
569static irqreturn_t greth_interrupt(int irq, void *dev_id) 577static irqreturn_t greth_interrupt(int irq, void *dev_id)
570{ 578{
571 struct net_device *dev = dev_id; 579 struct net_device *dev = dev_id;
572 struct greth_private *greth; 580 struct greth_private *greth;
573 u32 status; 581 u32 status, ctrl;
574 irqreturn_t retval = IRQ_NONE; 582 irqreturn_t retval = IRQ_NONE;
575 583
576 greth = netdev_priv(dev); 584 greth = netdev_priv(dev);
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
580 /* Get the interrupt events that caused us to be here. */ 588 /* Get the interrupt events that caused us to be here. */
581 status = GRETH_REGLOAD(greth->regs->status); 589 status = GRETH_REGLOAD(greth->regs->status);
582 590
583 /* Handle rx and tx interrupts through poll */ 591 /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
584 if (status & (GRETH_INT_RX | GRETH_INT_TX)) { 592 * set regardless of whether IRQ is enabled or not. Especially
585 593 * important when shared IRQ.
586 /* Clear interrupt status */ 594 */
587 GRETH_REGORIN(greth->regs->status, 595 ctrl = GRETH_REGLOAD(greth->regs->control);
588 status & (GRETH_INT_RX | GRETH_INT_TX));
589 596
597 /* Handle rx and tx interrupts through poll */
598 if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
599 ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
590 retval = IRQ_HANDLED; 600 retval = IRQ_HANDLED;
591 601
592 /* Disable interrupts and schedule poll() */ 602 /* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
610 620
611 while (1) { 621 while (1) {
612 bdp = greth->tx_bd_base + greth->tx_last; 622 bdp = greth->tx_bd_base + greth->tx_last;
623 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
624 mb();
613 stat = greth_read_bd(&bdp->stat); 625 stat = greth_read_bd(&bdp->stat);
614 626
615 if (unlikely(stat & GRETH_BD_EN)) 627 if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
670 682
671 /* We only clean fully completed SKBs */ 683 /* We only clean fully completed SKBs */
672 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); 684 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
673 stat = bdp_last_frag->stat; 685
686 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
687 mb();
688 stat = greth_read_bd(&bdp_last_frag->stat);
674 689
675 if (stat & GRETH_BD_EN) 690 if (stat & GRETH_BD_EN)
676 break; 691 break;
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
702 greth->tx_free += nr_frags+1; 717 greth->tx_free += nr_frags+1;
703 dev_kfree_skb(skb); 718 dev_kfree_skb(skb);
704 } 719 }
705 if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
706 netif_wake_queue(dev);
707 }
708}
709 720
710static int greth_pending_packets(struct greth_private *greth) 721 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
711{ 722 netif_wake_queue(dev);
712 struct greth_bd *bdp;
713 u32 status;
714 bdp = greth->rx_bd_base + greth->rx_cur;
715 status = greth_read_bd(&bdp->stat);
716 if (status & GRETH_BD_EN)
717 return 0;
718 else
719 return 1;
720} 723}
721 724
722static int greth_rx(struct net_device *dev, int limit) 725static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
727 int pkt_len; 730 int pkt_len;
728 int bad, count; 731 int bad, count;
729 u32 status, dma_addr; 732 u32 status, dma_addr;
733 unsigned long flags;
730 734
731 greth = netdev_priv(dev); 735 greth = netdev_priv(dev);
732 736
733 for (count = 0; count < limit; ++count) { 737 for (count = 0; count < limit; ++count) {
734 738
735 bdp = greth->rx_bd_base + greth->rx_cur; 739 bdp = greth->rx_bd_base + greth->rx_cur;
740 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
741 mb();
736 status = greth_read_bd(&bdp->stat); 742 status = greth_read_bd(&bdp->stat);
737 dma_addr = greth_read_bd(&bdp->addr);
738 bad = 0;
739 743
740 if (unlikely(status & GRETH_BD_EN)) { 744 if (unlikely(status & GRETH_BD_EN)) {
741 break; 745 break;
742 } 746 }
743 747
748 dma_addr = greth_read_bd(&bdp->addr);
749 bad = 0;
750
744 /* Check status for errors. */ 751 /* Check status for errors. */
745 if (unlikely(status & GRETH_RXBD_STATUS)) { 752 if (unlikely(status & GRETH_RXBD_STATUS)) {
746 if (status & GRETH_RXBD_ERR_FT) { 753 if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
802 809
803 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); 810 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
804 811
812 spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
805 greth_enable_rx(greth); 813 greth_enable_rx(greth);
814 spin_unlock_irqrestore(&greth->devlock, flags);
806 815
807 greth->rx_cur = NEXT_RX(greth->rx_cur); 816 greth->rx_cur = NEXT_RX(greth->rx_cur);
808 } 817 }
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
836 int pkt_len; 845 int pkt_len;
837 int bad, count = 0; 846 int bad, count = 0;
838 u32 status, dma_addr; 847 u32 status, dma_addr;
848 unsigned long flags;
839 849
840 greth = netdev_priv(dev); 850 greth = netdev_priv(dev);
841 851
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
843 853
844 bdp = greth->rx_bd_base + greth->rx_cur; 854 bdp = greth->rx_bd_base + greth->rx_cur;
845 skb = greth->rx_skbuff[greth->rx_cur]; 855 skb = greth->rx_skbuff[greth->rx_cur];
856 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
857 mb();
846 status = greth_read_bd(&bdp->stat); 858 status = greth_read_bd(&bdp->stat);
847 bad = 0; 859 bad = 0;
848 860
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
865 } 877 }
866 } 878 }
867 879
868 /* Allocate new skb to replace current */ 880 /* Allocate new skb to replace current, not needed if the
869 newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); 881 * current skb can be reused */
870 882 if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
871 if (!bad && newskb) {
872 skb_reserve(newskb, NET_IP_ALIGN); 883 skb_reserve(newskb, NET_IP_ALIGN);
873 884
874 dma_addr = dma_map_single(greth->dev, 885 dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
905 if (net_ratelimit()) 916 if (net_ratelimit())
906 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); 917 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
907 dev_kfree_skb(newskb); 918 dev_kfree_skb(newskb);
919 /* reusing current skb, so it is a drop */
908 dev->stats.rx_dropped++; 920 dev->stats.rx_dropped++;
909 } 921 }
922 } else if (bad) {
923 /* Bad Frame transfer, the skb is reused */
924 dev->stats.rx_dropped++;
910 } else { 925 } else {
926 /* Failed Allocating a new skb. This is rather stupid
927 * but the current "filled" skb is reused, as if
928 * transfer failure. One could argue that RX descriptor
929 * table handling should be divided into cleaning and
930 * filling as the TX part of the driver
931 */
911 if (net_ratelimit()) 932 if (net_ratelimit())
912 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); 933 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
934 /* reusing current skb, so it is a drop */
913 dev->stats.rx_dropped++; 935 dev->stats.rx_dropped++;
914 } 936 }
915 937
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
920 942
921 wmb(); 943 wmb();
922 greth_write_bd(&bdp->stat, status); 944 greth_write_bd(&bdp->stat, status);
945 spin_lock_irqsave(&greth->devlock, flags);
923 greth_enable_rx(greth); 946 greth_enable_rx(greth);
947 spin_unlock_irqrestore(&greth->devlock, flags);
924 greth->rx_cur = NEXT_RX(greth->rx_cur); 948 greth->rx_cur = NEXT_RX(greth->rx_cur);
925 } 949 }
926 950
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
932{ 956{
933 struct greth_private *greth; 957 struct greth_private *greth;
934 int work_done = 0; 958 int work_done = 0;
959 unsigned long flags;
960 u32 mask, ctrl;
935 greth = container_of(napi, struct greth_private, napi); 961 greth = container_of(napi, struct greth_private, napi);
936 962
937 if (greth->gbit_mac) { 963restart_txrx_poll:
938 greth_clean_tx_gbit(greth->netdev); 964 if (netif_queue_stopped(greth->netdev)) {
939 } else { 965 if (greth->gbit_mac)
940 greth_clean_tx(greth->netdev); 966 greth_clean_tx_gbit(greth->netdev);
967 else
968 greth_clean_tx(greth->netdev);
941 } 969 }
942 970
943restart_poll:
944 if (greth->gbit_mac) { 971 if (greth->gbit_mac) {
945 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 972 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
946 } else { 973 } else {
@@ -949,15 +976,29 @@ restart_poll:
949 976
950 if (work_done < budget) { 977 if (work_done < budget) {
951 978
952 napi_complete(napi); 979 spin_lock_irqsave(&greth->devlock, flags);
980
981 ctrl = GRETH_REGLOAD(greth->regs->control);
982 if (netif_queue_stopped(greth->netdev)) {
983 GRETH_REGSAVE(greth->regs->control,
984 ctrl | GRETH_TXI | GRETH_RXI);
985 mask = GRETH_INT_RX | GRETH_INT_RE |
986 GRETH_INT_TX | GRETH_INT_TE;
987 } else {
988 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
989 mask = GRETH_INT_RX | GRETH_INT_RE;
990 }
953 991
954 if (greth_pending_packets(greth)) { 992 if (GRETH_REGLOAD(greth->regs->status) & mask) {
955 napi_reschedule(napi); 993 GRETH_REGSAVE(greth->regs->control, ctrl);
956 goto restart_poll; 994 spin_unlock_irqrestore(&greth->devlock, flags);
995 goto restart_txrx_poll;
996 } else {
997 __napi_complete(napi);
998 spin_unlock_irqrestore(&greth->devlock, flags);
957 } 999 }
958 } 1000 }
959 1001
960 greth_enable_irqs(greth);
961 return work_done; 1002 return work_done;
962} 1003}
963 1004
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
1152}; 1193};
1153 1194
1154static struct net_device_ops greth_netdev_ops = { 1195static struct net_device_ops greth_netdev_ops = {
1155 .ndo_open = greth_open, 1196 .ndo_open = greth_open,
1156 .ndo_stop = greth_close, 1197 .ndo_stop = greth_close,
1157 .ndo_start_xmit = greth_start_xmit, 1198 .ndo_start_xmit = greth_start_xmit,
1158 .ndo_set_mac_address = greth_set_mac_add, 1199 .ndo_set_mac_address = greth_set_mac_add,
1159 .ndo_validate_addr = eth_validate_addr, 1200 .ndo_validate_addr = eth_validate_addr,
1160}; 1201};
1161 1202
1162static inline int wait_for_mdio(struct greth_private *greth) 1203static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev)
1217 struct greth_private *greth = netdev_priv(dev); 1258 struct greth_private *greth = netdev_priv(dev);
1218 struct phy_device *phydev = greth->phy; 1259 struct phy_device *phydev = greth->phy;
1219 unsigned long flags; 1260 unsigned long flags;
1220
1221 int status_change = 0; 1261 int status_change = 0;
1262 u32 ctrl;
1222 1263
1223 spin_lock_irqsave(&greth->devlock, flags); 1264 spin_lock_irqsave(&greth->devlock, flags);
1224 1265
1225 if (phydev->link) { 1266 if (phydev->link) {
1226 1267
1227 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { 1268 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1228 1269 ctrl = GRETH_REGLOAD(greth->regs->control) &
1229 GRETH_REGANDIN(greth->regs->control, 1270 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
1230 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
1231 1271
1232 if (phydev->duplex) 1272 if (phydev->duplex)
1233 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); 1273 ctrl |= GRETH_CTRL_FD;
1234
1235 if (phydev->speed == SPEED_100) {
1236
1237 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
1238 }
1239 1274
1275 if (phydev->speed == SPEED_100)
1276 ctrl |= GRETH_CTRL_SP;
1240 else if (phydev->speed == SPEED_1000) 1277 else if (phydev->speed == SPEED_1000)
1241 GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); 1278 ctrl |= GRETH_CTRL_GB;
1242 1279
1280 GRETH_REGSAVE(greth->regs->control, ctrl);
1243 greth->speed = phydev->speed; 1281 greth->speed = phydev->speed;
1244 greth->duplex = phydev->duplex; 1282 greth->duplex = phydev->duplex;
1245 status_change = 1; 1283 status_change = 1;
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = {
1600 { 1638 {
1601 .name = "GAISLER_ETHMAC", 1639 .name = "GAISLER_ETHMAC",
1602 }, 1640 },
1641 {
1642 .name = "01_01d",
1643 },
1603 {}, 1644 {},
1604}; 1645};
1605 1646