aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSandeep Gopalpet <Sandeep.Kumar@freescale.com>2009-11-02 02:03:34 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-03 02:40:59 -0500
commit46ceb60ca80fa07703bc6eb8f4651f900dff5a82 (patch)
tree9bd694c3b1559cbd0fa72bff1f521ce4bcc95aa6
parent2e0246c72fa2e2b61865a2d5aaff1cc9155b9447 (diff)
gianfar: Add Multiple group Support
This patch introduces multiple group support for etsec2.0 devices. Multiple group support is provided by mapping the set of enabled queues to different groups and then programming the per group regsiters imask, ievent, rstat, tstat. The queues corresponding to a group are indicated by programming isrg (interrupt steering) registers. Signed-off-by: Sandeep Gopalpet <Sandeep.Kumar@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/gianfar.c532
-rw-r--r--drivers/net/gianfar.h29
-rw-r--r--drivers/net/gianfar_ethtool.c58
-rw-r--r--drivers/net/gianfar_sysfs.c12
4 files changed, 422 insertions, 209 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index aa258e899261..dc9fba09b17c 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -326,7 +326,7 @@ cleanup:
326 326
327static void gfar_init_tx_rx_base(struct gfar_private *priv) 327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{ 328{
329 struct gfar __iomem *regs = priv->gfargrp.regs; 329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
330 u32 *baddr; 330 u32 *baddr;
331 int i; 331 int i;
332 332
@@ -346,7 +346,7 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
346static void gfar_init_mac(struct net_device *ndev) 346static void gfar_init_mac(struct net_device *ndev)
347{ 347{
348 struct gfar_private *priv = netdev_priv(ndev); 348 struct gfar_private *priv = netdev_priv(ndev);
349 struct gfar __iomem *regs = priv->gfargrp.regs; 349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
350 u32 rctrl = 0; 350 u32 rctrl = 0;
351 u32 tctrl = 0; 351 u32 tctrl = 0;
352 u32 attrs = 0; 352 u32 attrs = 0;
@@ -355,13 +355,7 @@ static void gfar_init_mac(struct net_device *ndev)
355 gfar_init_tx_rx_base(priv); 355 gfar_init_tx_rx_base(priv);
356 356
357 /* Configure the coalescing support */ 357 /* Configure the coalescing support */
358 gfar_write(&regs->txic, 0); 358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
359 if (priv->tx_queue[0]->txcoalescing)
360 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
361
362 gfar_write(&regs->rxic, 0);
363 if (priv->rx_queue[0]->rxcoalescing)
364 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
365 359
366 if (priv->rx_filer_enable) 360 if (priv->rx_filer_enable)
367 rctrl |= RCTRL_FILREN; 361 rctrl |= RCTRL_FILREN;
@@ -495,16 +489,91 @@ static void free_rx_pointers(struct gfar_private *priv)
495 kfree(priv->rx_queue[i]); 489 kfree(priv->rx_queue[i]);
496} 490}
497 491
492static void unmap_group_regs(struct gfar_private *priv)
493{
494 int i = 0;
495
496 for (i = 0; i < MAXGROUPS; i++)
497 if (priv->gfargrp[i].regs)
498 iounmap(priv->gfargrp[i].regs);
499}
500
501static void disable_napi(struct gfar_private *priv)
502{
503 int i = 0;
504
505 for (i = 0; i < priv->num_grps; i++)
506 napi_disable(&priv->gfargrp[i].napi);
507}
508
509static void enable_napi(struct gfar_private *priv)
510{
511 int i = 0;
512
513 for (i = 0; i < priv->num_grps; i++)
514 napi_enable(&priv->gfargrp[i].napi);
515}
516
517static int gfar_parse_group(struct device_node *np,
518 struct gfar_private *priv, const char *model)
519{
520 u32 *queue_mask;
521 u64 addr, size;
522
523 addr = of_translate_address(np,
524 of_get_address(np, 0, &size, NULL));
525 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
526
527 if (!priv->gfargrp[priv->num_grps].regs)
528 return -ENOMEM;
529
530 priv->gfargrp[priv->num_grps].interruptTransmit =
531 irq_of_parse_and_map(np, 0);
532
533 /* If we aren't the FEC we have multiple interrupts */
534 if (model && strcasecmp(model, "FEC")) {
535 priv->gfargrp[priv->num_grps].interruptReceive =
536 irq_of_parse_and_map(np, 1);
537 priv->gfargrp[priv->num_grps].interruptError =
538 irq_of_parse_and_map(np,2);
539 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
540 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
541 priv->gfargrp[priv->num_grps].interruptError < 0) {
542 return -EINVAL;
543 }
544 }
545
546 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
547 priv->gfargrp[priv->num_grps].priv = priv;
548 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
549 if(priv->mode == MQ_MG_MODE) {
550 queue_mask = (u32 *)of_get_property(np,
551 "fsl,rx-bit-map", NULL);
552 priv->gfargrp[priv->num_grps].rx_bit_map =
553 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
554 queue_mask = (u32 *)of_get_property(np,
555 "fsl,tx-bit-map", NULL);
556 priv->gfargrp[priv->num_grps].tx_bit_map =
557 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
558 } else {
559 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
560 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
561 }
562 priv->num_grps++;
563
564 return 0;
565}
566
498static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) 567static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
499{ 568{
500 const char *model; 569 const char *model;
501 const char *ctype; 570 const char *ctype;
502 const void *mac_addr; 571 const void *mac_addr;
503 u64 addr, size;
504 int err = 0, i; 572 int err = 0, i;
505 struct net_device *dev = NULL; 573 struct net_device *dev = NULL;
506 struct gfar_private *priv = NULL; 574 struct gfar_private *priv = NULL;
507 struct device_node *np = ofdev->node; 575 struct device_node *np = ofdev->node;
576 struct device_node *child = NULL;
508 const u32 *stash; 577 const u32 *stash;
509 const u32 *stash_len; 578 const u32 *stash_len;
510 const u32 *stash_idx; 579 const u32 *stash_idx;
@@ -548,36 +617,26 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
548 dev->real_num_tx_queues = num_tx_qs; 617 dev->real_num_tx_queues = num_tx_qs;
549 priv->num_tx_queues = num_tx_qs; 618 priv->num_tx_queues = num_tx_qs;
550 priv->num_rx_queues = num_rx_qs; 619 priv->num_rx_queues = num_rx_qs;
551 620 priv->num_grps = 0x0;
552 /* get a pointer to the register memory */
553 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
554 priv->gfargrp.regs = ioremap(addr, size);
555
556 if (priv->gfargrp.regs == NULL) {
557 err = -ENOMEM;
558 goto err_out;
559 }
560
561 priv->gfargrp.priv = priv; /* back pointer from group to priv */
562 priv->gfargrp.rx_bit_map = DEFAULT_MAPPING;
563 priv->gfargrp.tx_bit_map = DEFAULT_MAPPING;
564
565 priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
566 621
567 model = of_get_property(np, "model", NULL); 622 model = of_get_property(np, "model", NULL);
568 623
569 /* If we aren't the FEC we have multiple interrupts */ 624 for (i = 0; i < MAXGROUPS; i++)
570 if (model && strcasecmp(model, "FEC")) { 625 priv->gfargrp[i].regs = NULL;
571 priv->gfargrp.interruptReceive = irq_of_parse_and_map(np, 1);
572 626
573 priv->gfargrp.interruptError = irq_of_parse_and_map(np, 2); 627 /* Parse and initialize group specific information */
574 628 if (of_device_is_compatible(np, "fsl,etsec2")) {
575 if (priv->gfargrp.interruptTransmit < 0 || 629 priv->mode = MQ_MG_MODE;
576 priv->gfargrp.interruptReceive < 0 || 630 for_each_child_of_node(np, child) {
577 priv->gfargrp.interruptError < 0) { 631 err = gfar_parse_group(child, priv, model);
578 err = -EINVAL; 632 if (err)
579 goto err_out; 633 goto err_grp_init;
580 } 634 }
635 } else {
636 priv->mode = SQ_SG_MODE;
637 err = gfar_parse_group(np, priv, model);
638 if(err)
639 goto err_grp_init;
581 } 640 }
582 641
583 for (i = 0; i < priv->num_tx_queues; i++) 642 for (i = 0; i < priv->num_tx_queues; i++)
@@ -676,8 +735,8 @@ rx_alloc_failed:
676 free_rx_pointers(priv); 735 free_rx_pointers(priv);
677tx_alloc_failed: 736tx_alloc_failed:
678 free_tx_pointers(priv); 737 free_tx_pointers(priv);
679err_out: 738err_grp_init:
680 iounmap(priv->gfargrp.regs); 739 unmap_group_regs(priv);
681 free_netdev(dev); 740 free_netdev(dev);
682 return err; 741 return err;
683} 742}
@@ -716,9 +775,11 @@ static int gfar_probe(struct of_device *ofdev,
716 struct net_device *dev = NULL; 775 struct net_device *dev = NULL;
717 struct gfar_private *priv = NULL; 776 struct gfar_private *priv = NULL;
718 struct gfar __iomem *regs = NULL; 777 struct gfar __iomem *regs = NULL;
719 int err = 0, i; 778 int err = 0, i, grp_idx = 0;
720 int len_devname; 779 int len_devname;
721 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 780 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
781 u32 isrg = 0;
782 u32 *baddr;
722 783
723 err = gfar_of_init(ofdev, &dev); 784 err = gfar_of_init(ofdev, &dev);
724 785
@@ -731,12 +792,11 @@ static int gfar_probe(struct of_device *ofdev,
731 priv->node = ofdev->node; 792 priv->node = ofdev->node;
732 SET_NETDEV_DEV(dev, &ofdev->dev); 793 SET_NETDEV_DEV(dev, &ofdev->dev);
733 794
734 spin_lock_init(&priv->gfargrp.grplock);
735 spin_lock_init(&priv->bflock); 795 spin_lock_init(&priv->bflock);
736 INIT_WORK(&priv->reset_task, gfar_reset_task); 796 INIT_WORK(&priv->reset_task, gfar_reset_task);
737 797
738 dev_set_drvdata(&ofdev->dev, priv); 798 dev_set_drvdata(&ofdev->dev, priv);
739 regs = priv->gfargrp.regs; 799 regs = priv->gfargrp[0].regs;
740 800
741 /* Stop the DMA engine now, in case it was running before */ 801 /* Stop the DMA engine now, in case it was running before */
742 /* (The firmware could have used it, and left it running). */ 802 /* (The firmware could have used it, and left it running). */
@@ -769,7 +829,8 @@ static int gfar_probe(struct of_device *ofdev,
769 dev->ethtool_ops = &gfar_ethtool_ops; 829 dev->ethtool_ops = &gfar_ethtool_ops;
770 830
771 /* Register for napi ...We are registering NAPI for each grp */ 831 /* Register for napi ...We are registering NAPI for each grp */
772 netif_napi_add(dev, &priv->gfargrp.napi, gfar_poll, GFAR_DEV_WEIGHT); 832 for (i = 0; i < priv->num_grps; i++)
833 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
773 834
774 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 835 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
775 priv->rx_csum_enable = 1; 836 priv->rx_csum_enable = 1;
@@ -825,25 +886,51 @@ static int gfar_probe(struct of_device *ofdev,
825 if (dev->features & NETIF_F_IP_CSUM) 886 if (dev->features & NETIF_F_IP_CSUM)
826 dev->hard_header_len += GMAC_FCB_LEN; 887 dev->hard_header_len += GMAC_FCB_LEN;
827 888
889 /* Program the isrg regs only if number of grps > 1 */
890 if (priv->num_grps > 1) {
891 baddr = &regs->isrg0;
892 for (i = 0; i < priv->num_grps; i++) {
893 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
894 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
895 gfar_write(baddr, isrg);
896 baddr++;
897 isrg = 0x0;
898 }
899 }
900
828 /* Need to reverse the bit maps as bit_map's MSB is q0 901 /* Need to reverse the bit maps as bit_map's MSB is q0
829 * but, for_each_bit parses from right to left, which 902 * but, for_each_bit parses from right to left, which
830 * basically reverses the queue numbers */ 903 * basically reverses the queue numbers */
831 priv->gfargrp.tx_bit_map = reverse_bitmap(priv->gfargrp.tx_bit_map, MAX_TX_QS); 904 for (i = 0; i< priv->num_grps; i++) {
832 priv->gfargrp.rx_bit_map = reverse_bitmap(priv->gfargrp.rx_bit_map, MAX_RX_QS); 905 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
833 906 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
834 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values */ 907 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
835 for_each_bit(i, &priv->gfargrp.rx_bit_map, priv->num_rx_queues) { 908 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
836 priv->gfargrp.num_rx_queues++; 909 }
837 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 910
838 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 911 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
839 } 912 * also assign queues to groups */
840 for_each_bit (i, &priv->gfargrp.tx_bit_map, priv->num_tx_queues) { 913 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
841 priv->gfargrp.num_tx_queues++; 914 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
842 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 915 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
843 tqueue = tqueue | (TQUEUE_EN0 >> i); 916 priv->num_rx_queues) {
917 priv->gfargrp[grp_idx].num_rx_queues++;
918 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
919 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
920 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
921 }
922 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
923 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
924 priv->num_tx_queues) {
925 priv->gfargrp[grp_idx].num_tx_queues++;
926 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
927 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
928 tqueue = tqueue | (TQUEUE_EN0 >> i);
929 }
930 priv->gfargrp[grp_idx].rstat = rstat;
931 priv->gfargrp[grp_idx].tstat = tstat;
932 rstat = tstat =0;
844 } 933 }
845 priv->gfargrp.rstat = rstat;
846 priv->gfargrp.tstat = tstat;
847 934
848 gfar_write(&regs->rqueue, rqueue); 935 gfar_write(&regs->rqueue, rqueue);
849 gfar_write(&regs->tqueue, tqueue); 936 gfar_write(&regs->tqueue, tqueue);
@@ -883,20 +970,40 @@ static int gfar_probe(struct of_device *ofdev,
883 970
884 /* fill out IRQ number and name fields */ 971 /* fill out IRQ number and name fields */
885 len_devname = strlen(dev->name); 972 len_devname = strlen(dev->name);
886 strncpy(&priv->gfargrp.int_name_tx[0], dev->name, len_devname); 973 for (i = 0; i < priv->num_grps; i++) {
887 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 974 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
888 strncpy(&priv->gfargrp.int_name_tx[len_devname], 975 len_devname);
889 "_tx", sizeof("_tx") + 1); 976 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
890 977 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
891 strncpy(&priv->gfargrp.int_name_rx[0], dev->name, len_devname); 978 "_g", sizeof("_g"));
892 strncpy(&priv->gfargrp.int_name_rx[len_devname], 979 priv->gfargrp[i].int_name_tx[
893 "_rx", sizeof("_rx") + 1); 980 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
894 981 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
895 strncpy(&priv->gfargrp.int_name_er[0], dev->name, len_devname); 982 priv->gfargrp[i].int_name_tx)],
896 strncpy(&priv->gfargrp.int_name_er[len_devname], 983 "_tx", sizeof("_tx") + 1);
897 "_er", sizeof("_er") + 1); 984
898 } else 985 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
899 priv->gfargrp.int_name_tx[len_devname] = '\0'; 986 len_devname);
987 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
988 "_g", sizeof("_g"));
989 priv->gfargrp[i].int_name_rx[
990 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
991 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
992 priv->gfargrp[i].int_name_rx)],
993 "_rx", sizeof("_rx") + 1);
994
995 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
996 len_devname);
997 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
998 "_g", sizeof("_g"));
999 priv->gfargrp[i].int_name_er[strlen(
1000 priv->gfargrp[i].int_name_er)] = i+48;
1001 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1002 priv->gfargrp[i].int_name_er)],
1003 "_er", sizeof("_er") + 1);
1004 } else
1005 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1006 }
900 1007
901 /* Create all the sysfs files */ 1008 /* Create all the sysfs files */
902 gfar_init_sysfs(dev); 1009 gfar_init_sysfs(dev);
@@ -917,7 +1024,7 @@ static int gfar_probe(struct of_device *ofdev,
917 return 0; 1024 return 0;
918 1025
919register_fail: 1026register_fail:
920 iounmap(priv->gfargrp.regs); 1027 unmap_group_regs(priv);
921 free_tx_pointers(priv); 1028 free_tx_pointers(priv);
922 free_rx_pointers(priv); 1029 free_rx_pointers(priv);
923 if (priv->phy_node) 1030 if (priv->phy_node)
@@ -940,7 +1047,7 @@ static int gfar_remove(struct of_device *ofdev)
940 dev_set_drvdata(&ofdev->dev, NULL); 1047 dev_set_drvdata(&ofdev->dev, NULL);
941 1048
942 unregister_netdev(priv->ndev); 1049 unregister_netdev(priv->ndev);
943 iounmap(priv->gfargrp.regs); 1050 unmap_group_regs(priv);
944 free_netdev(priv->ndev); 1051 free_netdev(priv->ndev);
945 1052
946 return 0; 1053 return 0;
@@ -952,7 +1059,7 @@ static int gfar_suspend(struct device *dev)
952{ 1059{
953 struct gfar_private *priv = dev_get_drvdata(dev); 1060 struct gfar_private *priv = dev_get_drvdata(dev);
954 struct net_device *ndev = priv->ndev; 1061 struct net_device *ndev = priv->ndev;
955 struct gfar __iomem *regs = NULL; 1062 struct gfar __iomem *regs = priv->gfargrp[0].regs;
956 unsigned long flags; 1063 unsigned long flags;
957 u32 tempval; 1064 u32 tempval;
958 1065
@@ -960,7 +1067,6 @@ static int gfar_suspend(struct device *dev)
960 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1067 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
961 1068
962 netif_device_detach(ndev); 1069 netif_device_detach(ndev);
963 regs = priv->gfargrp.regs;
964 1070
965 if (netif_running(ndev)) { 1071 if (netif_running(ndev)) {
966 1072
@@ -984,7 +1090,7 @@ static int gfar_suspend(struct device *dev)
984 unlock_tx_qs(priv); 1090 unlock_tx_qs(priv);
985 local_irq_restore(flags); 1091 local_irq_restore(flags);
986 1092
987 napi_disable(&priv->gfargrp.napi); 1093 disable_napi(priv);
988 1094
989 if (magic_packet) { 1095 if (magic_packet) {
990 /* Enable interrupt on Magic Packet */ 1096 /* Enable interrupt on Magic Packet */
@@ -1006,7 +1112,7 @@ static int gfar_resume(struct device *dev)
1006{ 1112{
1007 struct gfar_private *priv = dev_get_drvdata(dev); 1113 struct gfar_private *priv = dev_get_drvdata(dev);
1008 struct net_device *ndev = priv->ndev; 1114 struct net_device *ndev = priv->ndev;
1009 struct gfar __iomem *regs = NULL; 1115 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1010 unsigned long flags; 1116 unsigned long flags;
1011 u32 tempval; 1117 u32 tempval;
1012 int magic_packet = priv->wol_en && 1118 int magic_packet = priv->wol_en &&
@@ -1023,8 +1129,6 @@ static int gfar_resume(struct device *dev)
1023 /* Disable Magic Packet mode, in case something 1129 /* Disable Magic Packet mode, in case something
1024 * else woke us up. 1130 * else woke us up.
1025 */ 1131 */
1026 regs = priv->gfargrp.regs;
1027
1028 local_irq_save(flags); 1132 local_irq_save(flags);
1029 lock_tx_qs(priv); 1133 lock_tx_qs(priv);
1030 lock_rx_qs(priv); 1134 lock_rx_qs(priv);
@@ -1041,7 +1145,7 @@ static int gfar_resume(struct device *dev)
1041 1145
1042 netif_device_attach(ndev); 1146 netif_device_attach(ndev);
1043 1147
1044 napi_enable(&priv->gfargrp.napi); 1148 enable_napi(priv);
1045 1149
1046 return 0; 1150 return 0;
1047} 1151}
@@ -1107,10 +1211,9 @@ static int gfar_legacy_resume(struct of_device *ofdev)
1107static phy_interface_t gfar_get_interface(struct net_device *dev) 1211static phy_interface_t gfar_get_interface(struct net_device *dev)
1108{ 1212{
1109 struct gfar_private *priv = netdev_priv(dev); 1213 struct gfar_private *priv = netdev_priv(dev);
1110 struct gfar __iomem *regs = NULL; 1214 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1111 u32 ecntrl; 1215 u32 ecntrl;
1112 1216
1113 regs = priv->gfargrp.regs;
1114 ecntrl = gfar_read(&regs->ecntrl); 1217 ecntrl = gfar_read(&regs->ecntrl);
1115 1218
1116 if (ecntrl & ECNTRL_SGMII_MODE) 1219 if (ecntrl & ECNTRL_SGMII_MODE)
@@ -1234,14 +1337,18 @@ static void init_registers(struct net_device *dev)
1234{ 1337{
1235 struct gfar_private *priv = netdev_priv(dev); 1338 struct gfar_private *priv = netdev_priv(dev);
1236 struct gfar __iomem *regs = NULL; 1339 struct gfar __iomem *regs = NULL;
1340 int i = 0;
1237 1341
1238 regs = priv->gfargrp.regs; 1342 for (i = 0; i < priv->num_grps; i++) {
1239 /* Clear IEVENT */ 1343 regs = priv->gfargrp[i].regs;
1240 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1344 /* Clear IEVENT */
1345 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1241 1346
1242 /* Initialize IMASK */ 1347 /* Initialize IMASK */
1243 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1348 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1349 }
1244 1350
1351 regs = priv->gfargrp[0].regs;
1245 /* Init hash registers to zero */ 1352 /* Init hash registers to zero */
1246 gfar_write(&regs->igaddr0, 0); 1353 gfar_write(&regs->igaddr0, 0);
1247 gfar_write(&regs->igaddr1, 0); 1354 gfar_write(&regs->igaddr1, 0);
@@ -1282,15 +1389,20 @@ static void init_registers(struct net_device *dev)
1282static void gfar_halt_nodisable(struct net_device *dev) 1389static void gfar_halt_nodisable(struct net_device *dev)
1283{ 1390{
1284 struct gfar_private *priv = netdev_priv(dev); 1391 struct gfar_private *priv = netdev_priv(dev);
1285 struct gfar __iomem *regs = priv->gfargrp.regs; 1392 struct gfar __iomem *regs = NULL;
1286 u32 tempval; 1393 u32 tempval;
1394 int i = 0;
1287 1395
1288 /* Mask all interrupts */ 1396 for (i = 0; i < priv->num_grps; i++) {
1289 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1397 regs = priv->gfargrp[i].regs;
1398 /* Mask all interrupts */
1399 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1290 1400
1291 /* Clear all interrupts */ 1401 /* Clear all interrupts */
1292 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1402 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1403 }
1293 1404
1405 regs = priv->gfargrp[0].regs;
1294 /* Stop the DMA, and wait for it to stop */ 1406 /* Stop the DMA, and wait for it to stop */
1295 tempval = gfar_read(&regs->dmactrl); 1407 tempval = gfar_read(&regs->dmactrl);
1296 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1408 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
@@ -1308,7 +1420,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
1308void gfar_halt(struct net_device *dev) 1420void gfar_halt(struct net_device *dev)
1309{ 1421{
1310 struct gfar_private *priv = netdev_priv(dev); 1422 struct gfar_private *priv = netdev_priv(dev);
1311 struct gfar __iomem *regs = priv->gfargrp.regs; 1423 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1312 u32 tempval; 1424 u32 tempval;
1313 1425
1314 gfar_halt_nodisable(dev); 1426 gfar_halt_nodisable(dev);
@@ -1319,10 +1431,18 @@ void gfar_halt(struct net_device *dev)
1319 gfar_write(&regs->maccfg1, tempval); 1431 gfar_write(&regs->maccfg1, tempval);
1320} 1432}
1321 1433
1434static void free_grp_irqs(struct gfar_priv_grp *grp)
1435{
1436 free_irq(grp->interruptError, grp);
1437 free_irq(grp->interruptTransmit, grp);
1438 free_irq(grp->interruptReceive, grp);
1439}
1440
1322void stop_gfar(struct net_device *dev) 1441void stop_gfar(struct net_device *dev)
1323{ 1442{
1324 struct gfar_private *priv = netdev_priv(dev); 1443 struct gfar_private *priv = netdev_priv(dev);
1325 unsigned long flags; 1444 unsigned long flags;
1445 int i;
1326 1446
1327 phy_stop(priv->phydev); 1447 phy_stop(priv->phydev);
1328 1448
@@ -1340,11 +1460,12 @@ void stop_gfar(struct net_device *dev)
1340 1460
1341 /* Free the IRQs */ 1461 /* Free the IRQs */
1342 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1462 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1343 free_irq(priv->gfargrp.interruptError, &priv->gfargrp); 1463 for (i = 0; i < priv->num_grps; i++)
1344 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp); 1464 free_grp_irqs(&priv->gfargrp[i]);
1345 free_irq(priv->gfargrp.interruptReceive, &priv->gfargrp);
1346 } else { 1465 } else {
1347 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp); 1466 for (i = 0; i < priv->num_grps; i++)
1467 free_irq(priv->gfargrp[i].interruptTransmit,
1468 &priv->gfargrp[i]);
1348 } 1469 }
1349 1470
1350 free_skb_resources(priv); 1471 free_skb_resources(priv);
@@ -1432,8 +1553,9 @@ static void free_skb_resources(struct gfar_private *priv)
1432void gfar_start(struct net_device *dev) 1553void gfar_start(struct net_device *dev)
1433{ 1554{
1434 struct gfar_private *priv = netdev_priv(dev); 1555 struct gfar_private *priv = netdev_priv(dev);
1435 struct gfar __iomem *regs = priv->gfargrp.regs; 1556 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1436 u32 tempval; 1557 u32 tempval;
1558 int i = 0;
1437 1559
1438 /* Enable Rx and Tx in MACCFG1 */ 1560 /* Enable Rx and Tx in MACCFG1 */
1439 tempval = gfar_read(&regs->maccfg1); 1561 tempval = gfar_read(&regs->maccfg1);
@@ -1450,92 +1572,149 @@ void gfar_start(struct net_device *dev)
1450 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1572 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1451 gfar_write(&regs->dmactrl, tempval); 1573 gfar_write(&regs->dmactrl, tempval);
1452 1574
1453 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1575 for (i = 0; i < priv->num_grps; i++) {
1454 gfar_write(&regs->tstat, priv->gfargrp.tstat); 1576 regs = priv->gfargrp[i].regs;
1455 gfar_write(&regs->rstat, priv->gfargrp.rstat); 1577 /* Clear THLT/RHLT, so that the DMA starts polling now */
1456 1578 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1457 /* Unmask the interrupts we look for */ 1579 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1458 gfar_write(&regs->imask, IMASK_DEFAULT); 1580 /* Unmask the interrupts we look for */
1581 gfar_write(&regs->imask, IMASK_DEFAULT);
1582 }
1459 1583
1460 dev->trans_start = jiffies; 1584 dev->trans_start = jiffies;
1461} 1585}
1462 1586
1463/* Bring the controller up and running */ 1587void gfar_configure_coalescing(struct gfar_private *priv,
1464int startup_gfar(struct net_device *ndev) 1588 unsigned int tx_mask, unsigned int rx_mask)
1465{ 1589{
1466 struct gfar_private *priv = netdev_priv(ndev); 1590 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1467 struct gfar __iomem *regs = priv->gfargrp.regs; 1591 u32 *baddr;
1468 int err; 1592 int i = 0;
1469 1593
1470 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1594 /* Backward compatible case ---- even if we enable
1595 * multiple queues, there's only single reg to program
1596 */
1597 gfar_write(&regs->txic, 0);
1598 if(likely(priv->tx_queue[0]->txcoalescing))
1599 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1471 1600
1472 err = gfar_alloc_skb_resources(ndev); 1601 gfar_write(&regs->rxic, 0);
1473 if (err) 1602 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1474 return err; 1603 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1475 1604
1476 gfar_init_mac(ndev); 1605 if (priv->mode == MQ_MG_MODE) {
1606 baddr = &regs->txic0;
1607 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1608 if (likely(priv->tx_queue[i]->txcoalescing)) {
1609 gfar_write(baddr + i, 0);
1610 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1611 }
1612 }
1613
1614 baddr = &regs->rxic0;
1615 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1616 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1617 gfar_write(baddr + i, 0);
1618 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1619 }
1620 }
1621 }
1622}
1623
1624static int register_grp_irqs(struct gfar_priv_grp *grp)
1625{
1626 struct gfar_private *priv = grp->priv;
1627 struct net_device *dev = priv->ndev;
1628 int err;
1477 1629
1478 /* If the device has multiple interrupts, register for 1630 /* If the device has multiple interrupts, register for
1479 * them. Otherwise, only register for the one */ 1631 * them. Otherwise, only register for the one */
1480 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1632 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1481 /* Install our interrupt handlers for Error, 1633 /* Install our interrupt handlers for Error,
1482 * Transmit, and Receive */ 1634 * Transmit, and Receive */
1483 err = request_irq(priv->gfargrp.interruptError, gfar_error, 0, 1635 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1484 priv->gfargrp.int_name_er, &priv->gfargrp); 1636 grp->int_name_er,grp)) < 0) {
1485 if (err) {
1486 if (netif_msg_intr(priv)) 1637 if (netif_msg_intr(priv))
1487 pr_err("%s: Can't get IRQ %d\n", ndev->name, 1638 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1488 priv->gfargrp.interruptError); 1639 dev->name, grp->interruptError);
1489 goto err_irq_fail; 1640
1641 goto err_irq_fail;
1490 } 1642 }
1491 1643
1492 err = request_irq(priv->gfargrp.interruptTransmit, 1644 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1493 gfar_transmit, 0, 1645 0, grp->int_name_tx, grp)) < 0) {
1494 priv->gfargrp.int_name_tx,
1495 &priv->gfargrp);
1496 if (err) {
1497 if (netif_msg_intr(priv)) 1646 if (netif_msg_intr(priv))
1498 pr_err("%s: Can't get IRQ %d\n", ndev->name, 1647 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1499 priv->gfargrp.interruptTransmit); 1648 dev->name, grp->interruptTransmit);
1500 goto tx_irq_fail; 1649 goto tx_irq_fail;
1501 } 1650 }
1502 1651
1503 err = request_irq(priv->gfargrp.interruptReceive, 1652 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1504 gfar_receive, 0, 1653 grp->int_name_rx, grp)) < 0) {
1505 priv->gfargrp.int_name_rx,
1506 &priv->gfargrp);
1507 if (err) {
1508 if (netif_msg_intr(priv)) 1654 if (netif_msg_intr(priv))
1509 pr_err("%s: Can't get IRQ %d (receive0)\n", 1655 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1510 ndev->name, 1656 dev->name, grp->interruptReceive);
1511 priv->gfargrp.interruptReceive);
1512 goto rx_irq_fail; 1657 goto rx_irq_fail;
1513 } 1658 }
1514 } else { 1659 } else {
1515 err = request_irq(priv->gfargrp.interruptTransmit, 1660 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1516 gfar_interrupt, 0, 1661 grp->int_name_tx, grp)) < 0) {
1517 priv->gfargrp.int_name_tx,
1518 &priv->gfargrp);
1519 if (err) {
1520 if (netif_msg_intr(priv)) 1662 if (netif_msg_intr(priv))
1521 pr_err("%s: Can't get IRQ %d\n", ndev->name, 1663 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1522 priv->gfargrp.interruptTransmit); 1664 dev->name, grp->interruptTransmit);
1523 goto err_irq_fail; 1665 goto err_irq_fail;
1524 } 1666 }
1525 } 1667 }
1526 1668
1669 return 0;
1670
1671rx_irq_fail:
1672 free_irq(grp->interruptTransmit, grp);
1673tx_irq_fail:
1674 free_irq(grp->interruptError, grp);
1675err_irq_fail:
1676 return err;
1677
1678}
1679
1680/* Bring the controller up and running */
1681int startup_gfar(struct net_device *ndev)
1682{
1683 struct gfar_private *priv = netdev_priv(ndev);
1684 struct gfar __iomem *regs = NULL;
1685 int err, i, j;
1686
1687 for (i = 0; i < priv->num_grps; i++) {
1688 regs= priv->gfargrp[i].regs;
1689 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1690 }
1691
1692 regs= priv->gfargrp[0].regs;
1693 err = gfar_alloc_skb_resources(ndev);
1694 if (err)
1695 return err;
1696
1697 gfar_init_mac(ndev);
1698
1699 for (i = 0; i < priv->num_grps; i++) {
1700 err = register_grp_irqs(&priv->gfargrp[i]);
1701 if (err) {
1702 for (j = 0; j < i; j++)
1703 free_grp_irqs(&priv->gfargrp[j]);
1704 goto irq_fail;
1705 }
1706 }
1707
1527 /* Start the controller */ 1708 /* Start the controller */
1528 gfar_start(ndev); 1709 gfar_start(ndev);
1529 1710
1530 phy_start(priv->phydev); 1711 phy_start(priv->phydev);
1531 1712
1713 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1714
1532 return 0; 1715 return 0;
1533 1716
1534rx_irq_fail: 1717irq_fail:
1535 free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
1536tx_irq_fail:
1537 free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
1538err_irq_fail:
1539 free_skb_resources(priv); 1718 free_skb_resources(priv);
1540 return err; 1719 return err;
1541} 1720}
@@ -1547,7 +1726,7 @@ static int gfar_enet_open(struct net_device *dev)
1547 struct gfar_private *priv = netdev_priv(dev); 1726 struct gfar_private *priv = netdev_priv(dev);
1548 int err; 1727 int err;
1549 1728
1550 napi_enable(&priv->gfargrp.napi); 1729 enable_napi(priv);
1551 1730
1552 skb_queue_head_init(&priv->rx_recycle); 1731 skb_queue_head_init(&priv->rx_recycle);
1553 1732
@@ -1559,13 +1738,13 @@ static int gfar_enet_open(struct net_device *dev)
1559 err = init_phy(dev); 1738 err = init_phy(dev);
1560 1739
1561 if (err) { 1740 if (err) {
1562 napi_disable(&priv->gfargrp.napi); 1741 disable_napi(priv);
1563 return err; 1742 return err;
1564 } 1743 }
1565 1744
1566 err = startup_gfar(dev); 1745 err = startup_gfar(dev);
1567 if (err) { 1746 if (err) {
1568 napi_disable(&priv->gfargrp.napi); 1747 disable_napi(priv);
1569 return err; 1748 return err;
1570 } 1749 }
1571 1750
@@ -1654,7 +1833,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1654 tx_queue = priv->tx_queue[rq]; 1833 tx_queue = priv->tx_queue[rq];
1655 txq = netdev_get_tx_queue(dev, rq); 1834 txq = netdev_get_tx_queue(dev, rq);
1656 base = tx_queue->tx_bd_base; 1835 base = tx_queue->tx_bd_base;
1657 regs = priv->gfargrp.regs; 1836 regs = tx_queue->grp->regs;
1658 1837
1659 /* make space for additional header when fcb is needed */ 1838 /* make space for additional header when fcb is needed */
1660 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1839 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1791,7 +1970,7 @@ static int gfar_close(struct net_device *dev)
1791{ 1970{
1792 struct gfar_private *priv = netdev_priv(dev); 1971 struct gfar_private *priv = netdev_priv(dev);
1793 1972
1794 napi_disable(&priv->gfargrp.napi); 1973 disable_napi(priv);
1795 1974
1796 skb_queue_purge(&priv->rx_recycle); 1975 skb_queue_purge(&priv->rx_recycle);
1797 cancel_work_sync(&priv->reset_task); 1976 cancel_work_sync(&priv->reset_task);
@@ -1824,7 +2003,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1824 unsigned long flags; 2003 unsigned long flags;
1825 u32 tempval; 2004 u32 tempval;
1826 2005
1827 regs = priv->gfargrp.regs; 2006 regs = priv->gfargrp[0].regs;
1828 local_irq_save(flags); 2007 local_irq_save(flags);
1829 lock_rx_qs(priv); 2008 lock_rx_qs(priv);
1830 2009
@@ -1868,7 +2047,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1868{ 2047{
1869 int tempsize, tempval; 2048 int tempsize, tempval;
1870 struct gfar_private *priv = netdev_priv(dev); 2049 struct gfar_private *priv = netdev_priv(dev);
1871 struct gfar __iomem *regs = priv->gfargrp.regs; 2050 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1872 int oldsize = priv->rx_buffer_size; 2051 int oldsize = priv->rx_buffer_size;
1873 int frame_size = new_mtu + ETH_HLEN; 2052 int frame_size = new_mtu + ETH_HLEN;
1874 2053
@@ -2290,7 +2469,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2290 struct gfar_priv_grp *gfargrp = container_of(napi, 2469 struct gfar_priv_grp *gfargrp = container_of(napi,
2291 struct gfar_priv_grp, napi); 2470 struct gfar_priv_grp, napi);
2292 struct gfar_private *priv = gfargrp->priv; 2471 struct gfar_private *priv = gfargrp->priv;
2293 struct gfar __iomem *regs = priv->gfargrp.regs; 2472 struct gfar __iomem *regs = gfargrp->regs;
2294 struct gfar_priv_tx_q *tx_queue = NULL; 2473 struct gfar_priv_tx_q *tx_queue = NULL;
2295 struct gfar_priv_rx_q *rx_queue = NULL; 2474 struct gfar_priv_rx_q *rx_queue = NULL;
2296 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2475 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
@@ -2349,14 +2528,8 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2349 2528
2350 /* If we are coalescing interrupts, update the timer */ 2529 /* If we are coalescing interrupts, update the timer */
2351 /* Otherwise, clear it */ 2530 /* Otherwise, clear it */
2352 if (likely(rx_queue->rxcoalescing)) { 2531 gfar_configure_coalescing(priv,
2353 gfar_write(&regs->rxic, 0); 2532 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
2354 gfar_write(&regs->rxic, rx_queue->rxic);
2355 }
2356 if (likely(tx_queue->txcoalescing)) {
2357 gfar_write(&regs->txic, 0);
2358 gfar_write(&regs->txic, tx_queue->txic);
2359 }
2360 } 2533 }
2361 2534
2362 return rx_cleaned; 2535 return rx_cleaned;
@@ -2371,20 +2544,26 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2371static void gfar_netpoll(struct net_device *dev) 2544static void gfar_netpoll(struct net_device *dev)
2372{ 2545{
2373 struct gfar_private *priv = netdev_priv(dev); 2546 struct gfar_private *priv = netdev_priv(dev);
2547 int i = 0;
2374 2548
2375 /* If the device has multiple interrupts, run tx/rx */ 2549 /* If the device has multiple interrupts, run tx/rx */
2376 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2550 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2377 disable_irq(priv->gfargrp.interruptTransmit); 2551 for (i = 0; i < priv->num_grps; i++) {
2378 disable_irq(priv->gfargrp.interruptReceive); 2552 disable_irq(priv->gfargrp[i].interruptTransmit);
2379 disable_irq(priv->gfargrp.interruptError); 2553 disable_irq(priv->gfargrp[i].interruptReceive);
2380 gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp); 2554 disable_irq(priv->gfargrp[i].interruptError);
2381 enable_irq(priv->gfargrp.interruptError); 2555 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2382 enable_irq(priv->gfargrp.interruptReceive); 2556 &priv->gfargrp[i]);
2383 enable_irq(priv->gfargrp.interruptTransmit); 2557 enable_irq(priv->gfargrp[i].interruptError);
2558 enable_irq(priv->gfargrp[i].interruptReceive);
2559 enable_irq(priv->gfargrp[i].interruptTransmit);
2560 }
2384 } else { 2561 } else {
2385 disable_irq(priv->gfargrp.interruptTransmit); 2562 for (i = 0; i < priv->num_grps; i++) {
2386 gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp); 2563 disable_irq(priv->gfargrp[i].interruptTransmit);
2387 enable_irq(priv->gfargrp.interruptTransmit); 2564 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2565 &priv->gfargrp[i]);
2566 enable_irq(priv->gfargrp[i].interruptTransmit);
2388 } 2567 }
2389} 2568}
2390#endif 2569#endif
@@ -2421,7 +2600,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2421static void adjust_link(struct net_device *dev) 2600static void adjust_link(struct net_device *dev)
2422{ 2601{
2423 struct gfar_private *priv = netdev_priv(dev); 2602 struct gfar_private *priv = netdev_priv(dev);
2424 struct gfar __iomem *regs = priv->gfargrp.regs; 2603 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2425 unsigned long flags; 2604 unsigned long flags;
2426 struct phy_device *phydev = priv->phydev; 2605 struct phy_device *phydev = priv->phydev;
2427 int new_state = 0; 2606 int new_state = 0;
@@ -2505,7 +2684,7 @@ static void gfar_set_multi(struct net_device *dev)
2505{ 2684{
2506 struct dev_mc_list *mc_ptr; 2685 struct dev_mc_list *mc_ptr;
2507 struct gfar_private *priv = netdev_priv(dev); 2686 struct gfar_private *priv = netdev_priv(dev);
2508 struct gfar __iomem *regs = priv->gfargrp.regs; 2687 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2509 u32 tempval; 2688 u32 tempval;
2510 2689
2511 if (dev->flags & IFF_PROMISC) { 2690 if (dev->flags & IFF_PROMISC) {
@@ -2638,7 +2817,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2638static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2817static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2639{ 2818{
2640 struct gfar_private *priv = netdev_priv(dev); 2819 struct gfar_private *priv = netdev_priv(dev);
2641 struct gfar __iomem *regs = priv->gfargrp.regs; 2820 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2642 int idx; 2821 int idx;
2643 char tmpbuf[MAC_ADDR_LEN]; 2822 char tmpbuf[MAC_ADDR_LEN];
2644 u32 tempval; 2823 u32 tempval;
@@ -2742,6 +2921,9 @@ static struct of_device_id gfar_match[] =
2742 .type = "network", 2921 .type = "network",
2743 .compatible = "gianfar", 2922 .compatible = "gianfar",
2744 }, 2923 },
2924 {
2925 .compatible = "fsl,etsec2",
2926 },
2745 {}, 2927 {},
2746}; 2928};
2747MODULE_DEVICE_TABLE(of, gfar_match); 2929MODULE_DEVICE_TABLE(of, gfar_match);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 08518c205035..a2c1f963cdd6 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -79,6 +79,9 @@ extern const char gfar_driver_version[];
79#define MAX_TX_QS 0x8 79#define MAX_TX_QS 0x8
80#define MAX_RX_QS 0x8 80#define MAX_RX_QS 0x8
81 81
82/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
83#define MAXGROUPS 0x2
84
82/* These need to be powers of 2 for this driver */ 85/* These need to be powers of 2 for this driver */
83#define DEFAULT_TX_RING_SIZE 256 86#define DEFAULT_TX_RING_SIZE 256
84#define DEFAULT_RX_RING_SIZE 256 87#define DEFAULT_RX_RING_SIZE 256
@@ -795,7 +798,24 @@ struct gfar {
795#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 798#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
796#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 799#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
797 800
801#if (MAXGROUPS == 2)
802#define DEFAULT_MAPPING 0xAA
803#else
798#define DEFAULT_MAPPING 0xFF 804#define DEFAULT_MAPPING 0xFF
805#endif
806
807#define ISRG_SHIFT_TX 0x10
808#define ISRG_SHIFT_RX 0x18
809
810/* The same driver can operate in two modes */
811/* SQ_SG_MODE: Single Queue Single Group Mode
812 * (Backward compatible mode)
813 * MQ_MG_MODE: Multi Queue Multi Group mode
814 */
815enum {
816 SQ_SG_MODE = 0,
817 MQ_MG_MODE
818};
799 819
800/** 820/**
801 * struct gfar_priv_tx_q - per tx queue structure 821 * struct gfar_priv_tx_q - per tx queue structure
@@ -825,6 +845,7 @@ struct gfar_priv_tx_q {
825 struct txbd8 *cur_tx; 845 struct txbd8 *cur_tx;
826 struct txbd8 *dirty_tx; 846 struct txbd8 *dirty_tx;
827 struct net_device *dev; 847 struct net_device *dev;
848 struct gfar_priv_grp *grp;
828 u16 skb_curtx; 849 u16 skb_curtx;
829 u16 skb_dirtytx; 850 u16 skb_dirtytx;
830 u16 qindex; 851 u16 qindex;
@@ -858,6 +879,7 @@ struct gfar_priv_rx_q {
858 struct rxbd8 *rx_bd_base; 879 struct rxbd8 *rx_bd_base;
859 struct rxbd8 *cur_rx; 880 struct rxbd8 *cur_rx;
860 struct net_device *dev; 881 struct net_device *dev;
882 struct gfar_priv_grp *grp;
861 u16 skb_currx; 883 u16 skb_currx;
862 u16 qindex; 884 u16 qindex;
863 unsigned int rx_ring_size; 885 unsigned int rx_ring_size;
@@ -885,6 +907,7 @@ struct gfar_priv_grp {
885 struct napi_struct napi; 907 struct napi_struct napi;
886 struct gfar_private *priv; 908 struct gfar_private *priv;
887 struct gfar __iomem *regs; 909 struct gfar __iomem *regs;
910 unsigned int grp_id;
888 unsigned int rx_bit_map; 911 unsigned int rx_bit_map;
889 unsigned int tx_bit_map; 912 unsigned int tx_bit_map;
890 unsigned int num_tx_queues; 913 unsigned int num_tx_queues;
@@ -916,6 +939,8 @@ struct gfar_private {
916 /* Indicates how many tx, rx queues are enabled */ 939 /* Indicates how many tx, rx queues are enabled */
917 unsigned int num_tx_queues; 940 unsigned int num_tx_queues;
918 unsigned int num_rx_queues; 941 unsigned int num_rx_queues;
942 unsigned int num_grps;
943 unsigned int mode;
919 944
920 /* The total tx and rx ring size for the enabled queues */ 945 /* The total tx and rx ring size for the enabled queues */
921 unsigned int total_tx_ring_size; 946 unsigned int total_tx_ring_size;
@@ -925,7 +950,7 @@ struct gfar_private {
925 struct net_device *ndev; 950 struct net_device *ndev;
926 struct of_device *ofdev; 951 struct of_device *ofdev;
927 952
928 struct gfar_priv_grp gfargrp; 953 struct gfar_priv_grp gfargrp[MAXGROUPS];
929 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; 954 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
930 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; 955 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
931 956
@@ -999,6 +1024,8 @@ extern void stop_gfar(struct net_device *dev);
999extern void gfar_halt(struct net_device *dev); 1024extern void gfar_halt(struct net_device *dev);
1000extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1025extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
1001 int enable, u32 regnum, u32 read); 1026 int enable, u32 regnum, u32 read);
1027extern void gfar_configure_coalescing(struct gfar_private *priv,
1028 unsigned int tx_mask, unsigned int rx_mask);
1002void gfar_init_sysfs(struct net_device *dev); 1029void gfar_init_sysfs(struct net_device *dev);
1003 1030
1004extern const struct ethtool_ops gfar_ethtool_ops; 1031extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index d3d26234f190..562f6c20f591 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -137,7 +137,7 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
137{ 137{
138 int i; 138 int i;
139 struct gfar_private *priv = netdev_priv(dev); 139 struct gfar_private *priv = netdev_priv(dev);
140 struct gfar __iomem *regs = priv->gfargrp.regs; 140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
141 u64 *extra = (u64 *) & priv->extra_stats; 141 u64 *extra = (u64 *) & priv->extra_stats;
142 142
143 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 143 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
@@ -226,7 +226,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
226{ 226{
227 int i; 227 int i;
228 struct gfar_private *priv = netdev_priv(dev); 228 struct gfar_private *priv = netdev_priv(dev);
229 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp.regs; 229 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
230 u32 *buf = (u32 *) regbuf; 230 u32 *buf = (u32 *) regbuf;
231 231
232 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 232 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -352,22 +352,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
352static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 352static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
353{ 353{
354 struct gfar_private *priv = netdev_priv(dev); 354 struct gfar_private *priv = netdev_priv(dev);
355 struct gfar __iomem *regs = priv->gfargrp.regs; 355 int i = 0;
356 struct gfar_priv_tx_q *tx_queue = NULL;
357 struct gfar_priv_rx_q *rx_queue = NULL;
358 356
359 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 357 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
360 return -EOPNOTSUPP; 358 return -EOPNOTSUPP;
361 359
362 tx_queue = priv->tx_queue[0];
363 rx_queue = priv->rx_queue[0];
364
365 /* Set up rx coalescing */ 360 /* Set up rx coalescing */
361 /* As of now, we will enable/disable coalescing for all
362 * queues together in case of eTSEC2, this will be modified
363 * along with the ethtool interface */
366 if ((cvals->rx_coalesce_usecs == 0) || 364 if ((cvals->rx_coalesce_usecs == 0) ||
367 (cvals->rx_max_coalesced_frames == 0)) 365 (cvals->rx_max_coalesced_frames == 0)) {
368 rx_queue->rxcoalescing = 0; 366 for (i = 0; i < priv->num_rx_queues; i++)
369 else 367 priv->rx_queue[i]->rxcoalescing = 0;
370 rx_queue->rxcoalescing = 1; 368 } else {
369 for (i = 0; i < priv->num_rx_queues; i++)
370 priv->rx_queue[i]->rxcoalescing = 1;
371 }
371 372
372 if (NULL == priv->phydev) 373 if (NULL == priv->phydev)
373 return -ENODEV; 374 return -ENODEV;
@@ -385,15 +386,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
385 return -EINVAL; 386 return -EINVAL;
386 } 387 }
387 388
388 rx_queue->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 389 for (i = 0; i < priv->num_rx_queues; i++) {
389 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 390 priv->rx_queue[i]->rxic = mk_ic_value(
391 cvals->rx_max_coalesced_frames,
392 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
393 }
390 394
391 /* Set up tx coalescing */ 395 /* Set up tx coalescing */
392 if ((cvals->tx_coalesce_usecs == 0) || 396 if ((cvals->tx_coalesce_usecs == 0) ||
393 (cvals->tx_max_coalesced_frames == 0)) 397 (cvals->tx_max_coalesced_frames == 0)) {
394 tx_queue->txcoalescing = 0; 398 for (i = 0; i < priv->num_tx_queues; i++)
395 else 399 priv->tx_queue[i]->txcoalescing = 0;
396 tx_queue->txcoalescing = 1; 400 } else {
401 for (i = 0; i < priv->num_tx_queues; i++)
402 priv->tx_queue[i]->txcoalescing = 1;
403 }
397 404
398 /* Check the bounds of the values */ 405 /* Check the bounds of the values */
399 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 406 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -408,16 +415,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
408 return -EINVAL; 415 return -EINVAL;
409 } 416 }
410 417
411 tx_queue->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 418 for (i = 0; i < priv->num_tx_queues; i++) {
412 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 419 priv->tx_queue[i]->txic = mk_ic_value(
413 420 cvals->tx_max_coalesced_frames,
414 gfar_write(&regs->rxic, 0); 421 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
415 if (rx_queue->rxcoalescing) 422 }
416 gfar_write(&regs->rxic, rx_queue->rxic);
417 423
418 gfar_write(&regs->txic, 0); 424 gfar_configure_coalescing(priv, 0xFF, 0xFF);
419 if (tx_queue->txcoalescing)
420 gfar_write(&regs->txic, tx_queue->txic);
421 425
422 return 0; 426 return 0;
423} 427}
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 4b726f61314e..3724835d2856 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -50,7 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
50 const char *buf, size_t count) 50 const char *buf, size_t count)
51{ 51{
52 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 52 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
53 struct gfar __iomem *regs = priv->gfargrp.regs; 53 struct gfar __iomem *regs = priv->gfargrp[0].regs;
54 int new_setting = 0; 54 int new_setting = 0;
55 u32 temp; 55 u32 temp;
56 unsigned long flags; 56 unsigned long flags;
@@ -105,7 +105,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
105 const char *buf, size_t count) 105 const char *buf, size_t count)
106{ 106{
107 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 107 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
108 struct gfar __iomem *regs = priv->gfargrp.regs; 108 struct gfar __iomem *regs = priv->gfargrp[0].regs;
109 unsigned int length = simple_strtoul(buf, NULL, 0); 109 unsigned int length = simple_strtoul(buf, NULL, 0);
110 u32 temp; 110 u32 temp;
111 unsigned long flags; 111 unsigned long flags;
@@ -164,7 +164,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
164 const char *buf, size_t count) 164 const char *buf, size_t count)
165{ 165{
166 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 166 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
167 struct gfar __iomem *regs = priv->gfargrp.regs; 167 struct gfar __iomem *regs = priv->gfargrp[0].regs;
168 unsigned short index = simple_strtoul(buf, NULL, 0); 168 unsigned short index = simple_strtoul(buf, NULL, 0);
169 u32 temp; 169 u32 temp;
170 unsigned long flags; 170 unsigned long flags;
@@ -212,7 +212,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
212 const char *buf, size_t count) 212 const char *buf, size_t count)
213{ 213{
214 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 214 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
215 struct gfar __iomem *regs = priv->gfargrp.regs; 215 struct gfar __iomem *regs = priv->gfargrp[0].regs;
216 unsigned int length = simple_strtoul(buf, NULL, 0); 216 unsigned int length = simple_strtoul(buf, NULL, 0);
217 u32 temp; 217 u32 temp;
218 unsigned long flags; 218 unsigned long flags;
@@ -252,7 +252,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
252 const char *buf, size_t count) 252 const char *buf, size_t count)
253{ 253{
254 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 254 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
255 struct gfar __iomem *regs = priv->gfargrp.regs; 255 struct gfar __iomem *regs = priv->gfargrp[0].regs;
256 unsigned int num = simple_strtoul(buf, NULL, 0); 256 unsigned int num = simple_strtoul(buf, NULL, 0);
257 u32 temp; 257 u32 temp;
258 unsigned long flags; 258 unsigned long flags;
@@ -293,7 +293,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
293 const char *buf, size_t count) 293 const char *buf, size_t count)
294{ 294{
295 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 295 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
296 struct gfar __iomem *regs = priv->gfargrp.regs; 296 struct gfar __iomem *regs = priv->gfargrp[0].regs;
297 unsigned int num = simple_strtoul(buf, NULL, 0); 297 unsigned int num = simple_strtoul(buf, NULL, 0);
298 u32 temp; 298 u32 temp;
299 unsigned long flags; 299 unsigned long flags;