aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-06-24 00:23:01 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-24 02:16:04 -0400
commitecba38abfdaf012713b661177c2a6db12bb996c3 (patch)
tree66803ba6b032d3ebd41bba37b3979fc0b056b377 /drivers/net
parent52a34c7fe43afcbcd0e1ecfdef6b3dfdb9a90180 (diff)
[NET] sunqe: Convert to new SBUS driver layer.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sunqe.c468
1 files changed, 219 insertions, 249 deletions
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 1f2323be60d4..9da6d5b87173 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -1,10 +1,9 @@
1/* $Id: sunqe.c,v 1.55 2002/01/15 06:48:55 davem Exp $ 1/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
2 * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
3 * Once again I am out to prove that every ethernet 2 * Once again I am out to prove that every ethernet
4 * controller out there can be most efficiently programmed 3 * controller out there can be most efficiently programmed
5 * if you make it look like a LANCE. 4 * if you make it look like a LANCE.
6 * 5 *
7 * Copyright (C) 1996, 1999, 2003 David S. Miller (davem@redhat.com) 6 * Copyright (C) 1996, 1999, 2003, 2006 David S. Miller (davem@davemloft.net)
8 */ 7 */
9 8
10#include <linux/module.h> 9#include <linux/module.h>
@@ -41,9 +40,9 @@
41#include "sunqe.h" 40#include "sunqe.h"
42 41
43#define DRV_NAME "sunqe" 42#define DRV_NAME "sunqe"
44#define DRV_VERSION "3.0" 43#define DRV_VERSION "4.0"
45#define DRV_RELDATE "8/24/03" 44#define DRV_RELDATE "June 23, 2006"
46#define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 45#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
47 46
48static char version[] = 47static char version[] =
49 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 48 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
@@ -755,298 +754,269 @@ static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
755 qecp->gregs + GLOB_RSIZE); 754 qecp->gregs + GLOB_RSIZE);
756} 755}
757 756
758/* Four QE's per QEC card. */ 757static u8 __init qec_get_burst(struct device_node *dp)
759static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev)
760{ 758{
761 static unsigned version_printed;
762 struct net_device *qe_devs[4];
763 struct sunqe *qeps[4];
764 struct sbus_dev *qesdevs[4];
765 struct sbus_dev *child;
766 struct sunqec *qecp = NULL;
767 u8 bsizes, bsizes_more; 759 u8 bsizes, bsizes_more;
768 int i, j, res = -ENOMEM;
769 760
770 for (i = 0; i < 4; i++) { 761 /* Find and set the burst sizes for the QEC, since it
771 qe_devs[i] = alloc_etherdev(sizeof(struct sunqe)); 762 * does the actual dma for all 4 channels.
772 if (!qe_devs[i]) 763 */
773 goto out; 764 bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
774 } 765 bsizes &= 0xff;
766 bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
775 767
776 if (version_printed++ == 0) 768 if (bsizes_more != 0xff)
777 printk(KERN_INFO "%s", version); 769 bsizes &= bsizes_more;
770 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
771 (bsizes & DMA_BURST32)==0)
772 bsizes = (DMA_BURST32 - 1);
778 773
779 for (i = 0; i < 4; i++) { 774 return bsizes;
780 qeps[i] = (struct sunqe *) qe_devs[i]->priv; 775}
781 for (j = 0; j < 6; j++)
782 qe_devs[i]->dev_addr[j] = idprom->id_ethaddr[j];
783 qeps[i]->channel = i;
784 spin_lock_init(&qeps[i]->lock);
785 }
786 776
787 qecp = kmalloc(sizeof(struct sunqec), GFP_KERNEL); 777static struct sunqec * __init get_qec(struct sbus_dev *child_sdev)
788 if (qecp == NULL) 778{
789 goto out1; 779 struct sbus_dev *qec_sdev = child_sdev->parent;
790 qecp->qec_sdev = sdev; 780 struct sunqec *qecp;
791 781
792 for (i = 0; i < 4; i++) { 782 for (qecp = root_qec_dev; qecp; qecp = qecp->next_module) {
793 qecp->qes[i] = qeps[i]; 783 if (qecp->qec_sdev == qec_sdev)
794 qeps[i]->dev = qe_devs[i]; 784 break;
795 qeps[i]->parent = qecp;
796 } 785 }
786 if (!qecp) {
787 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
788 if (qecp) {
789 u32 ctrl;
790
791 qecp->qec_sdev = qec_sdev;
792 qecp->gregs = sbus_ioremap(&qec_sdev->resource[0], 0,
793 GLOB_REG_SIZE,
794 "QEC Global Registers");
795 if (!qecp->gregs)
796 goto fail;
797
798 /* Make sure the QEC is in MACE mode. */
799 ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
800 ctrl &= 0xf0000000;
801 if (ctrl != GLOB_CTRL_MMODE) {
802 printk(KERN_ERR "qec: Not in MACE mode!\n");
803 goto fail;
804 }
797 805
798 res = -ENODEV; 806 if (qec_global_reset(qecp->gregs))
807 goto fail;
799 808
800 for (i = 0, child = sdev->child; i < 4; i++, child = child->next) { 809 qecp->qec_bursts = qec_get_burst(qec_sdev->ofdev.node);
801 /* Link in channel */
802 j = prom_getintdefault(child->prom_node, "channel#", -1);
803 if (j == -1)
804 goto out2;
805 qesdevs[j] = child;
806 }
807 810
808 for (i = 0; i < 4; i++) 811 qec_init_once(qecp, qec_sdev);
809 qeps[i]->qe_sdev = qesdevs[i];
810 812
811 /* Now map in the registers, QEC globals first. */ 813 if (request_irq(qec_sdev->irqs[0], &qec_interrupt,
812 qecp->gregs = sbus_ioremap(&sdev->resource[0], 0, 814 SA_SHIRQ, "qec", (void *) qecp)) {
813 GLOB_REG_SIZE, "QEC Global Registers"); 815 printk(KERN_ERR "qec: Can't register irq.\n");
814 if (!qecp->gregs) { 816 goto fail;
815 printk(KERN_ERR "QuadEther: Cannot map QEC global registers.\n"); 817 }
816 goto out2;
817 }
818 818
819 /* Make sure the QEC is in MACE mode. */ 819 qecp->next_module = root_qec_dev;
820 if ((sbus_readl(qecp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_MMODE) { 820 root_qec_dev = qecp;
821 printk(KERN_ERR "QuadEther: AIEEE, QEC is not in MACE mode!\n"); 821 }
822 goto out3;
823 } 822 }
824 823
825 /* Reset the QEC. */ 824 return qecp;
826 if (qec_global_reset(qecp->gregs))
827 goto out3;
828 825
829 /* Find and set the burst sizes for the QEC, since it does 826fail:
830 * the actual dma for all 4 channels. 827 if (qecp->gregs)
831 */ 828 sbus_iounmap(qecp->gregs, GLOB_REG_SIZE);
832 bsizes = prom_getintdefault(sdev->prom_node, "burst-sizes", 0xff); 829 kfree(qecp);
833 bsizes &= 0xff; 830 return NULL;
834 bsizes_more = prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff); 831}
835 832
836 if (bsizes_more != 0xff) 833static int __init qec_ether_init(struct sbus_dev *sdev)
837 bsizes &= bsizes_more; 834{
838 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || 835 static unsigned version_printed;
839 (bsizes & DMA_BURST32)==0) 836 struct net_device *dev;
840 bsizes = (DMA_BURST32 - 1); 837 struct sunqe *qe;
838 struct sunqec *qecp;
839 int i, res;
841 840
842 qecp->qec_bursts = bsizes; 841 if (version_printed++ == 0)
842 printk(KERN_INFO "%s", version);
843 843
844 /* Perform one time QEC initialization, we never touch the QEC 844 dev = alloc_etherdev(sizeof(struct sunqe));
845 * globals again after this. 845 if (!dev)
846 */ 846 return -ENOMEM;
847 qec_init_once(qecp, sdev);
848
849 for (i = 0; i < 4; i++) {
850 struct sunqe *qe = qeps[i];
851 /* Map in QEC per-channel control registers. */
852 qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0,
853 CREG_REG_SIZE, "QEC Channel Registers");
854 if (!qe->qcregs) {
855 printk(KERN_ERR "QuadEther: Cannot map QE %d's channel registers.\n", i);
856 goto out4;
857 }
858 847
859 /* Map in per-channel AMD MACE registers. */ 848 qe = netdev_priv(dev);
860 qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0,
861 MREGS_REG_SIZE, "QE MACE Registers");
862 if (!qe->mregs) {
863 printk(KERN_ERR "QuadEther: Cannot map QE %d's MACE registers.\n", i);
864 goto out4;
865 }
866 849
867 qe->qe_block = sbus_alloc_consistent(qe->qe_sdev, 850 i = of_getintprop_default(sdev->ofdev.node, "channel#", -1);
868 PAGE_SIZE, 851 if (i == -1) {
869 &qe->qblock_dvma); 852 struct sbus_dev *td = sdev->parent->child;
870 qe->buffers = sbus_alloc_consistent(qe->qe_sdev, 853 i = 0;
871 sizeof(struct sunqe_buffers), 854 while (td != sdev) {
872 &qe->buffers_dvma); 855 td = td->next;
873 if (qe->qe_block == NULL || qe->qblock_dvma == 0 || 856 i++;
874 qe->buffers == NULL || qe->buffers_dvma == 0) {
875 goto out4;
876 } 857 }
877
878 /* Stop this QE. */
879 qe_stop(qe);
880 } 858 }
859 qe->channel = i;
860 spin_lock_init(&qe->lock);
861
862 res = -ENODEV;
863 qecp = get_qec(sdev);
864 if (!qecp)
865 goto fail;
881 866
882 for (i = 0; i < 4; i++) { 867 qecp->qes[qe->channel] = qe;
883 SET_MODULE_OWNER(qe_devs[i]); 868 qe->dev = dev;
884 qe_devs[i]->open = qe_open; 869 qe->parent = qecp;
885 qe_devs[i]->stop = qe_close; 870 qe->qe_sdev = sdev;
886 qe_devs[i]->hard_start_xmit = qe_start_xmit;
887 qe_devs[i]->get_stats = qe_get_stats;
888 qe_devs[i]->set_multicast_list = qe_set_multicast;
889 qe_devs[i]->tx_timeout = qe_tx_timeout;
890 qe_devs[i]->watchdog_timeo = 5*HZ;
891 qe_devs[i]->irq = sdev->irqs[0];
892 qe_devs[i]->dma = 0;
893 qe_devs[i]->ethtool_ops = &qe_ethtool_ops;
894 }
895 871
896 /* QEC receives interrupts from each QE, then it sends the actual 872 res = -ENOMEM;
897 * IRQ to the cpu itself. Since QEC is the single point of 873 qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0,
898 * interrupt for all QE channels we register the IRQ handler 874 CREG_REG_SIZE, "QEC Channel Registers");
899 * for it now. 875 if (!qe->qcregs) {
900 */ 876 printk(KERN_ERR "qe: Cannot map channel registers.\n");
901 if (request_irq(sdev->irqs[0], &qec_interrupt, 877 goto fail;
902 SA_SHIRQ, "QuadEther", (void *) qecp)) {
903 printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n");
904 res = -EAGAIN;
905 goto out4;
906 } 878 }
907 879
908 for (i = 0; i < 4; i++) { 880 qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0,
909 if (register_netdev(qe_devs[i]) != 0) 881 MREGS_REG_SIZE, "QE MACE Registers");
910 goto out5; 882 if (!qe->mregs) {
883 printk(KERN_ERR "qe: Cannot map MACE registers.\n");
884 goto fail;
911 } 885 }
912 886
913 /* Report the QE channels. */ 887 qe->qe_block = sbus_alloc_consistent(qe->qe_sdev,
914 for (i = 0; i < 4; i++) { 888 PAGE_SIZE,
915 printk(KERN_INFO "%s: QuadEthernet channel[%d] ", qe_devs[i]->name, i); 889 &qe->qblock_dvma);
916 for (j = 0; j < 6; j++) 890 qe->buffers = sbus_alloc_consistent(qe->qe_sdev,
917 printk ("%2.2x%c", 891 sizeof(struct sunqe_buffers),
918 qe_devs[i]->dev_addr[j], 892 &qe->buffers_dvma);
919 j == 5 ? ' ': ':'); 893 if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
920 printk("\n"); 894 qe->buffers == NULL || qe->buffers_dvma == 0)
921 } 895 goto fail;
896
897 /* Stop this QE. */
898 qe_stop(qe);
899
900 SET_MODULE_OWNER(dev);
901 SET_NETDEV_DEV(dev, &sdev->ofdev.dev);
902
903 dev->open = qe_open;
904 dev->stop = qe_close;
905 dev->hard_start_xmit = qe_start_xmit;
906 dev->get_stats = qe_get_stats;
907 dev->set_multicast_list = qe_set_multicast;
908 dev->tx_timeout = qe_tx_timeout;
909 dev->watchdog_timeo = 5*HZ;
910 dev->irq = sdev->irqs[0];
911 dev->dma = 0;
912 dev->ethtool_ops = &qe_ethtool_ops;
913
914 res = register_netdev(dev);
915 if (res)
916 goto fail;
917
918 dev_set_drvdata(&sdev->ofdev.dev, qe);
919
920 printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
921 for (i = 0; i < 6; i++)
922 printk ("%2.2x%c",
923 dev->dev_addr[i],
924 i == 5 ? ' ': ':');
925 printk("\n");
922 926
923 /* We are home free at this point, link the qe's into
924 * the master list for later driver exit.
925 */
926 qecp->next_module = root_qec_dev;
927 root_qec_dev = qecp;
928 927
929 return 0; 928 return 0;
930 929
931out5: 930fail:
932 while (i--) 931 if (qe->qcregs)
933 unregister_netdev(qe_devs[i]); 932 sbus_iounmap(qe->qcregs, CREG_REG_SIZE);
934 free_irq(sdev->irqs[0], (void *)qecp); 933 if (qe->mregs)
935out4: 934 sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
936 for (i = 0; i < 4; i++) { 935 if (qe->qe_block)
937 struct sunqe *qe = (struct sunqe *)qe_devs[i]->priv; 936 sbus_free_consistent(qe->qe_sdev,
938 937 PAGE_SIZE,
939 if (qe->qcregs) 938 qe->qe_block,
940 sbus_iounmap(qe->qcregs, CREG_REG_SIZE); 939 qe->qblock_dvma);
941 if (qe->mregs) 940 if (qe->buffers)
942 sbus_iounmap(qe->mregs, MREGS_REG_SIZE); 941 sbus_free_consistent(qe->qe_sdev,
943 if (qe->qe_block) 942 sizeof(struct sunqe_buffers),
944 sbus_free_consistent(qe->qe_sdev, 943 qe->buffers,
945 PAGE_SIZE, 944 qe->buffers_dvma);
946 qe->qe_block, 945
947 qe->qblock_dvma); 946 free_netdev(dev);
948 if (qe->buffers) 947
949 sbus_free_consistent(qe->qe_sdev,
950 sizeof(struct sunqe_buffers),
951 qe->buffers,
952 qe->buffers_dvma);
953 }
954out3:
955 sbus_iounmap(qecp->gregs, GLOB_REG_SIZE);
956out2:
957 kfree(qecp);
958out1:
959 i = 4;
960out:
961 while (i--)
962 free_netdev(qe_devs[i]);
963 return res; 948 return res;
964} 949}
965 950
966static int __init qec_match(struct sbus_dev *sdev) 951static int __devinit qec_sbus_probe(struct of_device *dev, const struct of_device_id *match)
967{ 952{
968 struct sbus_dev *sibling; 953 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
969 int i;
970
971 if (strcmp(sdev->prom_name, "qec") != 0)
972 return 0;
973 954
974 /* QEC can be parent of either QuadEthernet or BigMAC 955 return qec_ether_init(sdev);
975 * children. Do not confuse this with qfe/SUNW,qfe
976 * which is a quad-happymeal card and handled by
977 * a different driver.
978 */
979 sibling = sdev->child;
980 for (i = 0; i < 4; i++) {
981 if (sibling == NULL)
982 return 0;
983 if (strcmp(sibling->prom_name, "qe") != 0)
984 return 0;
985 sibling = sibling->next;
986 }
987 return 1;
988} 956}
989 957
990static int __init qec_probe(void) 958static int __devexit qec_sbus_remove(struct of_device *dev)
991{ 959{
992 struct net_device *dev = NULL; 960 struct sunqe *qp = dev_get_drvdata(&dev->dev);
993 struct sbus_bus *bus; 961 struct net_device *net_dev = qp->dev;
994 struct sbus_dev *sdev = NULL; 962
995 static int called; 963 unregister_netdevice(net_dev);
996 int cards = 0, v; 964
997 965 sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
998 root_qec_dev = NULL; 966 sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
999 967 sbus_free_consistent(qp->qe_sdev,
1000 if (called) 968 PAGE_SIZE,
1001 return -ENODEV; 969 qp->qe_block,
1002 called++; 970 qp->qblock_dvma);
1003 971 sbus_free_consistent(qp->qe_sdev,
1004 for_each_sbus(bus) { 972 sizeof(struct sunqe_buffers),
1005 for_each_sbusdev(sdev, bus) { 973 qp->buffers,
1006 if (cards) 974 qp->buffers_dvma);
1007 dev = NULL; 975
1008 976 free_netdev(net_dev);
1009 if (qec_match(sdev)) { 977
1010 cards++; 978 dev_set_drvdata(&dev->dev, NULL);
1011 if ((v = qec_ether_init(dev, sdev))) 979
1012 return v;
1013 }
1014 }
1015 }
1016 if (!cards)
1017 return -ENODEV;
1018 return 0; 980 return 0;
1019} 981}
1020 982
1021static void __exit qec_cleanup(void) 983static struct of_device_id qec_sbus_match[] = {
984 {
985 .name = "qe",
986 },
987 {},
988};
989
990MODULE_DEVICE_TABLE(of, qec_sbus_match);
991
992static struct of_platform_driver qec_sbus_driver = {
993 .name = "qec",
994 .match_table = qec_sbus_match,
995 .probe = qec_sbus_probe,
996 .remove = __devexit_p(qec_sbus_remove),
997};
998
999static int __init qec_init(void)
1000{
1001 return of_register_driver(&qec_sbus_driver, &sbus_bus_type);
1002}
1003
1004static void __exit qec_exit(void)
1022{ 1005{
1023 struct sunqec *next_qec; 1006 of_unregister_driver(&qec_sbus_driver);
1024 int i;
1025 1007
1026 while (root_qec_dev) { 1008 while (root_qec_dev) {
1027 next_qec = root_qec_dev->next_module; 1009 struct sunqec *next = root_qec_dev->next_module;
1028 1010
1029 /* Release all four QE channels, then the QEC itself. */ 1011 free_irq(root_qec_dev->qec_sdev->irqs[0],
1030 for (i = 0; i < 4; i++) { 1012 (void *) root_qec_dev);
1031 unregister_netdev(root_qec_dev->qes[i]->dev);
1032 sbus_iounmap(root_qec_dev->qes[i]->qcregs, CREG_REG_SIZE);
1033 sbus_iounmap(root_qec_dev->qes[i]->mregs, MREGS_REG_SIZE);
1034 sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev,
1035 PAGE_SIZE,
1036 root_qec_dev->qes[i]->qe_block,
1037 root_qec_dev->qes[i]->qblock_dvma);
1038 sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev,
1039 sizeof(struct sunqe_buffers),
1040 root_qec_dev->qes[i]->buffers,
1041 root_qec_dev->qes[i]->buffers_dvma);
1042 free_netdev(root_qec_dev->qes[i]->dev);
1043 }
1044 free_irq(root_qec_dev->qec_sdev->irqs[0], (void *)root_qec_dev);
1045 sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE); 1013 sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE);
1014
1046 kfree(root_qec_dev); 1015 kfree(root_qec_dev);
1047 root_qec_dev = next_qec; 1016
1017 root_qec_dev = next;
1048 } 1018 }
1049} 1019}
1050 1020
1051module_init(qec_probe); 1021module_init(qec_init);
1052module_exit(qec_cleanup); 1022module_exit(qec_exit);